hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/trace/trace_functions_graph.c
....@@ -16,33 +16,6 @@
1616 #include "trace.h"
1717 #include "trace_output.h"
1818
19
-static bool kill_ftrace_graph;
20
-
21
-/**
22
- * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
23
- *
24
- * ftrace_graph_stop() is called when a severe error is detected in
25
- * the function graph tracing. This function is called by the critical
26
- * paths of function graph to keep those paths from doing any more harm.
27
- */
28
-bool ftrace_graph_is_dead(void)
29
-{
30
- return kill_ftrace_graph;
31
-}
32
-
33
-/**
34
- * ftrace_graph_stop - set to permanently disable function graph tracincg
35
- *
36
- * In case of an error int function graph tracing, this is called
37
- * to try to keep function graph tracing from causing any more harm.
38
- * Usually this is pretty severe and this is called to try to at least
39
- * get a warning out to the user.
40
- */
41
-void ftrace_graph_stop(void)
42
-{
43
- kill_ftrace_graph = true;
44
-}
45
-
4619 /* When set, irq functions will be ignored */
4720 static int ftrace_graph_skip_irqs;
4821
....@@ -87,8 +60,12 @@
8760 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
8861 /* Include sleep time (scheduled out) between entry and return */
8962 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
63
+
64
+#ifdef CONFIG_FUNCTION_PROFILER
9065 /* Include time within nested functions */
9166 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
67
+#endif
68
+
9269 { } /* Empty entry */
9370 };
9471
....@@ -117,270 +94,17 @@
11794 print_graph_duration(struct trace_array *tr, unsigned long long duration,
11895 struct trace_seq *s, u32 flags);
11996
120
-/* Add a function return address to the trace stack on thread info.*/
121
-static int
122
-ftrace_push_return_trace(unsigned long ret, unsigned long func,
123
- unsigned long frame_pointer, unsigned long *retp)
124
-{
125
- unsigned long long calltime;
126
- int index;
127
-
128
- if (unlikely(ftrace_graph_is_dead()))
129
- return -EBUSY;
130
-
131
- if (!current->ret_stack)
132
- return -EBUSY;
133
-
134
- /*
135
- * We must make sure the ret_stack is tested before we read
136
- * anything else.
137
- */
138
- smp_rmb();
139
-
140
- /* The return trace stack is full */
141
- if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
142
- atomic_inc(&current->trace_overrun);
143
- return -EBUSY;
144
- }
145
-
146
- /*
147
- * The curr_ret_stack is an index to ftrace return stack of
148
- * current task. Its value should be in [0, FTRACE_RETFUNC_
149
- * DEPTH) when the function graph tracer is used. To support
150
- * filtering out specific functions, it makes the index
151
- * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
152
- * so when it sees a negative index the ftrace will ignore
153
- * the record. And the index gets recovered when returning
154
- * from the filtered function by adding the FTRACE_NOTRACE_
155
- * DEPTH and then it'll continue to record functions normally.
156
- *
157
- * The curr_ret_stack is initialized to -1 and get increased
158
- * in this function. So it can be less than -1 only if it was
159
- * filtered out via ftrace_graph_notrace_addr() which can be
160
- * set from set_graph_notrace file in tracefs by user.
161
- */
162
- if (current->curr_ret_stack < -1)
163
- return -EBUSY;
164
-
165
- calltime = trace_clock_local();
166
-
167
- index = ++current->curr_ret_stack;
168
- if (ftrace_graph_notrace_addr(func))
169
- current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170
- barrier();
171
- current->ret_stack[index].ret = ret;
172
- current->ret_stack[index].func = func;
173
- current->ret_stack[index].calltime = calltime;
174
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175
- current->ret_stack[index].fp = frame_pointer;
176
-#endif
177
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178
- current->ret_stack[index].retp = retp;
179
-#endif
180
- return 0;
181
-}
182
-
183
-int function_graph_enter(unsigned long ret, unsigned long func,
184
- unsigned long frame_pointer, unsigned long *retp)
185
-{
186
- struct ftrace_graph_ent trace;
187
-
188
- trace.func = func;
189
- trace.depth = ++current->curr_ret_depth;
190
-
191
- if (ftrace_push_return_trace(ret, func,
192
- frame_pointer, retp))
193
- goto out;
194
-
195
- /* Only trace if the calling function expects to */
196
- if (!ftrace_graph_entry(&trace))
197
- goto out_ret;
198
-
199
- return 0;
200
- out_ret:
201
- current->curr_ret_stack--;
202
- out:
203
- current->curr_ret_depth--;
204
- return -EBUSY;
205
-}
206
-
207
-/* Retrieve a function return address to the trace stack on thread info.*/
208
-static void
209
-ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
210
- unsigned long frame_pointer)
211
-{
212
- int index;
213
-
214
- index = current->curr_ret_stack;
215
-
216
- /*
217
- * A negative index here means that it's just returned from a
218
- * notrace'd function. Recover index to get an original
219
- * return address. See ftrace_push_return_trace().
220
- *
221
- * TODO: Need to check whether the stack gets corrupted.
222
- */
223
- if (index < 0)
224
- index += FTRACE_NOTRACE_DEPTH;
225
-
226
- if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
227
- ftrace_graph_stop();
228
- WARN_ON(1);
229
- /* Might as well panic, otherwise we have no where to go */
230
- *ret = (unsigned long)panic;
231
- return;
232
- }
233
-
234
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
235
- /*
236
- * The arch may choose to record the frame pointer used
237
- * and check it here to make sure that it is what we expect it
238
- * to be. If gcc does not set the place holder of the return
239
- * address in the frame pointer, and does a copy instead, then
240
- * the function graph trace will fail. This test detects this
241
- * case.
242
- *
243
- * Currently, x86_32 with optimize for size (-Os) makes the latest
244
- * gcc do the above.
245
- *
246
- * Note, -mfentry does not use frame pointers, and this test
247
- * is not needed if CC_USING_FENTRY is set.
248
- */
249
- if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
250
- ftrace_graph_stop();
251
- WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
252
- " from func %ps return to %lx\n",
253
- current->ret_stack[index].fp,
254
- frame_pointer,
255
- (void *)current->ret_stack[index].func,
256
- current->ret_stack[index].ret);
257
- *ret = (unsigned long)panic;
258
- return;
259
- }
260
-#endif
261
-
262
- *ret = current->ret_stack[index].ret;
263
- trace->func = current->ret_stack[index].func;
264
- trace->calltime = current->ret_stack[index].calltime;
265
- trace->overrun = atomic_read(&current->trace_overrun);
266
- trace->depth = current->curr_ret_depth--;
267
- /*
268
- * We still want to trace interrupts coming in if
269
- * max_depth is set to 1. Make sure the decrement is
270
- * seen before ftrace_graph_return.
271
- */
272
- barrier();
273
-}
274
-
275
-/*
276
- * Send the trace to the ring-buffer.
277
- * @return the original return address.
278
- */
279
-unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
280
-{
281
- struct ftrace_graph_ret trace;
282
- unsigned long ret;
283
-
284
- ftrace_pop_return_trace(&trace, &ret, frame_pointer);
285
- trace.rettime = trace_clock_local();
286
- ftrace_graph_return(&trace);
287
- /*
288
- * The ftrace_graph_return() may still access the current
289
- * ret_stack structure, we need to make sure the update of
290
- * curr_ret_stack is after that.
291
- */
292
- barrier();
293
- current->curr_ret_stack--;
294
- /*
295
- * The curr_ret_stack can be less than -1 only if it was
296
- * filtered out and it's about to return from the function.
297
- * Recover the index and continue to trace normal functions.
298
- */
299
- if (current->curr_ret_stack < -1) {
300
- current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
301
- return ret;
302
- }
303
-
304
- if (unlikely(!ret)) {
305
- ftrace_graph_stop();
306
- WARN_ON(1);
307
- /* Might as well panic. What else to do? */
308
- ret = (unsigned long)panic;
309
- }
310
-
311
- return ret;
312
-}
313
-
314
-/**
315
- * ftrace_graph_ret_addr - convert a potentially modified stack return address
316
- * to its original value
317
- *
318
- * This function can be called by stack unwinding code to convert a found stack
319
- * return address ('ret') to its original value, in case the function graph
320
- * tracer has modified it to be 'return_to_handler'. If the address hasn't
321
- * been modified, the unchanged value of 'ret' is returned.
322
- *
323
- * 'idx' is a state variable which should be initialized by the caller to zero
324
- * before the first call.
325
- *
326
- * 'retp' is a pointer to the return address on the stack. It's ignored if
327
- * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
328
- */
329
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
330
-unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
331
- unsigned long ret, unsigned long *retp)
332
-{
333
- int index = task->curr_ret_stack;
334
- int i;
335
-
336
- if (ret != (unsigned long)return_to_handler)
337
- return ret;
338
-
339
- if (index < -1)
340
- index += FTRACE_NOTRACE_DEPTH;
341
-
342
- if (index < 0)
343
- return ret;
344
-
345
- for (i = 0; i <= index; i++)
346
- if (task->ret_stack[i].retp == retp)
347
- return task->ret_stack[i].ret;
348
-
349
- return ret;
350
-}
351
-#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
352
-unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
353
- unsigned long ret, unsigned long *retp)
354
-{
355
- int task_idx;
356
-
357
- if (ret != (unsigned long)return_to_handler)
358
- return ret;
359
-
360
- task_idx = task->curr_ret_stack;
361
-
362
- if (!task->ret_stack || task_idx < *idx)
363
- return ret;
364
-
365
- task_idx -= *idx;
366
- (*idx)++;
367
-
368
- return task->ret_stack[task_idx].ret;
369
-}
370
-#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
371
-
37297 int __trace_graph_entry(struct trace_array *tr,
37398 struct ftrace_graph_ent *trace,
374
- unsigned long flags,
375
- int pc)
99
+ unsigned int trace_ctx)
376100 {
377101 struct trace_event_call *call = &event_funcgraph_entry;
378102 struct ring_buffer_event *event;
379
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
103
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
380104 struct ftrace_graph_ent_entry *entry;
381105
382106 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
383
- sizeof(*entry), flags, pc);
107
+ sizeof(*entry), trace_ctx);
384108 if (!event)
385109 return 0;
386110 entry = ring_buffer_event_data(event);
....@@ -404,10 +128,29 @@
404128 struct trace_array *tr = graph_array;
405129 struct trace_array_cpu *data;
406130 unsigned long flags;
131
+ unsigned int trace_ctx;
407132 long disabled;
408133 int ret;
409134 int cpu;
410
- int pc;
135
+
136
+ if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
137
+ return 0;
138
+
139
+ /*
140
+ * Do not trace a function if it's filtered by set_graph_notrace.
141
+ * Make the index of ret stack negative to indicate that it should
142
+ * ignore further functions. But it needs its own ret stack entry
143
+ * to recover the original index in order to continue tracing after
144
+ * returning from the function.
145
+ */
146
+ if (ftrace_graph_notrace_addr(trace->func)) {
147
+ trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
148
+ /*
149
+ * Need to return 1 to have the return called
150
+ * that will clear the NOTRACE bit.
151
+ */
152
+ return 1;
153
+ }
411154
412155 if (!ftrace_trace_task(tr))
413156 return 0;
....@@ -419,16 +162,6 @@
419162 return 0;
420163
421164 /*
422
- * Do not trace a function if it's filtered by set_graph_notrace.
423
- * Make the index of ret stack negative to indicate that it should
424
- * ignore further functions. But it needs its own ret stack entry
425
- * to recover the original index in order to continue tracing after
426
- * returning from the function.
427
- */
428
- if (ftrace_graph_notrace_addr(trace->func))
429
- return 1;
430
-
431
- /*
432165 * Stop here if tracing_threshold is set. We only write function return
433166 * events to the ring buffer.
434167 */
....@@ -437,11 +170,11 @@
437170
438171 local_irq_save(flags);
439172 cpu = raw_smp_processor_id();
440
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
173
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
441174 disabled = atomic_inc_return(&data->disabled);
442175 if (likely(disabled == 1)) {
443
- pc = preempt_count();
444
- ret = __trace_graph_entry(tr, trace, flags, pc);
176
+ trace_ctx = tracing_gen_ctx_flags(flags);
177
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
445178 } else {
446179 ret = 0;
447180 }
....@@ -454,7 +187,7 @@
454187
455188 static void
456189 __trace_graph_function(struct trace_array *tr,
457
- unsigned long ip, unsigned long flags, int pc)
190
+ unsigned long ip, unsigned int trace_ctx)
458191 {
459192 u64 time = trace_clock_local();
460193 struct ftrace_graph_ent ent = {
....@@ -468,30 +201,29 @@
468201 .rettime = time,
469202 };
470203
471
- __trace_graph_entry(tr, &ent, flags, pc);
472
- __trace_graph_return(tr, &ret, flags, pc);
204
+ __trace_graph_entry(tr, &ent, trace_ctx);
205
+ __trace_graph_return(tr, &ret, trace_ctx);
473206 }
474207
475208 void
476209 trace_graph_function(struct trace_array *tr,
477210 unsigned long ip, unsigned long parent_ip,
478
- unsigned long flags, int pc)
211
+ unsigned int trace_ctx)
479212 {
480
- __trace_graph_function(tr, ip, flags, pc);
213
+ __trace_graph_function(tr, ip, trace_ctx);
481214 }
482215
483216 void __trace_graph_return(struct trace_array *tr,
484217 struct ftrace_graph_ret *trace,
485
- unsigned long flags,
486
- int pc)
218
+ unsigned int trace_ctx)
487219 {
488220 struct trace_event_call *call = &event_funcgraph_exit;
489221 struct ring_buffer_event *event;
490
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
222
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
491223 struct ftrace_graph_ret_entry *entry;
492224
493225 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
494
- sizeof(*entry), flags, pc);
226
+ sizeof(*entry), trace_ctx);
495227 if (!event)
496228 return;
497229 entry = ring_buffer_event_data(event);
....@@ -505,19 +237,24 @@
505237 struct trace_array *tr = graph_array;
506238 struct trace_array_cpu *data;
507239 unsigned long flags;
240
+ unsigned int trace_ctx;
508241 long disabled;
509242 int cpu;
510
- int pc;
511243
512244 ftrace_graph_addr_finish(trace);
513245
246
+ if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
247
+ trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
248
+ return;
249
+ }
250
+
514251 local_irq_save(flags);
515252 cpu = raw_smp_processor_id();
516
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
253
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
517254 disabled = atomic_inc_return(&data->disabled);
518255 if (likely(disabled == 1)) {
519
- pc = preempt_count();
520
- __trace_graph_return(tr, trace, flags, pc);
256
+ trace_ctx = tracing_gen_ctx_flags(flags);
257
+ __trace_graph_return(tr, trace, trace_ctx);
521258 }
522259 atomic_dec(&data->disabled);
523260 local_irq_restore(flags);
....@@ -536,6 +273,11 @@
536273 {
537274 ftrace_graph_addr_finish(trace);
538275
276
+ if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
277
+ trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
278
+ return;
279
+ }
280
+
539281 if (tracing_thresh &&
540282 (trace->rettime - trace->calltime < tracing_thresh))
541283 return;
....@@ -543,17 +285,25 @@
543285 trace_graph_return(trace);
544286 }
545287
288
+static struct fgraph_ops funcgraph_thresh_ops = {
289
+ .entryfunc = &trace_graph_entry,
290
+ .retfunc = &trace_graph_thresh_return,
291
+};
292
+
293
+static struct fgraph_ops funcgraph_ops = {
294
+ .entryfunc = &trace_graph_entry,
295
+ .retfunc = &trace_graph_return,
296
+};
297
+
546298 static int graph_trace_init(struct trace_array *tr)
547299 {
548300 int ret;
549301
550302 set_graph_array(tr);
551303 if (tracing_thresh)
552
- ret = register_ftrace_graph(&trace_graph_thresh_return,
553
- &trace_graph_entry);
304
+ ret = register_ftrace_graph(&funcgraph_thresh_ops);
554305 else
555
- ret = register_ftrace_graph(&trace_graph_return,
556
- &trace_graph_entry);
306
+ ret = register_ftrace_graph(&funcgraph_ops);
557307 if (ret)
558308 return ret;
559309 tracing_start_cmdline_record();
....@@ -564,7 +314,10 @@
564314 static void graph_trace_reset(struct trace_array *tr)
565315 {
566316 tracing_stop_cmdline_record();
567
- unregister_ftrace_graph();
317
+ if (tracing_thresh)
318
+ unregister_ftrace_graph(&funcgraph_thresh_ops);
319
+ else
320
+ unregister_ftrace_graph(&funcgraph_ops);
568321 }
569322
570323 static int graph_trace_update_thresh(struct trace_array *tr)
....@@ -622,6 +375,7 @@
622375 {
623376 trace_seq_putc(s, ' ');
624377 trace_print_lat_fmt(s, entry);
378
+ trace_seq_puts(s, " | ");
625379 }
626380
627381 /* If the pid changed since the last trace, output this event */
....@@ -688,9 +442,9 @@
688442 * We need to consume the current entry to see
689443 * the next one.
690444 */
691
- ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
445
+ ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
692446 NULL, NULL);
693
- event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
447
+ event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
694448 NULL, NULL);
695449 }
696450
....@@ -726,7 +480,7 @@
726480
727481 /* this is a leaf, now advance the iterator */
728482 if (ring_iter)
729
- ring_buffer_read(ring_iter, NULL);
483
+ ring_buffer_iter_advance(ring_iter);
730484
731485 return next;
732486 }
....@@ -740,6 +494,17 @@
740494
741495 trace_seq_printf(s, "%5lu.%06lu | ",
742496 (unsigned long)t, usecs_rem);
497
+}
498
+
499
+static void
500
+print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
501
+{
502
+ unsigned long long usecs;
503
+
504
+ usecs = iter->ts - iter->array_buffer->time_start;
505
+ do_div(usecs, NSEC_PER_USEC);
506
+
507
+ trace_seq_printf(s, "%9llu us | ", usecs);
743508 }
744509
745510 static void
....@@ -758,6 +523,10 @@
758523 /* Absolute time */
759524 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
760525 print_graph_abs_time(iter->ts, s);
526
+
527
+ /* Relative time */
528
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
529
+ print_graph_rel_time(iter, s);
761530
762531 /* Cpu */
763532 if (flags & TRACE_GRAPH_PRINT_CPU)
....@@ -874,10 +643,6 @@
874643
875644 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
876645
877
- /* If a graph tracer ignored set_graph_notrace */
878
- if (call->depth < -1)
879
- call->depth += FTRACE_NOTRACE_DEPTH;
880
-
881646 /*
882647 * Comments display at + 1 to depth. Since
883648 * this is a leaf function, keep the comments
....@@ -919,10 +684,6 @@
919684 if (data) {
920685 struct fgraph_cpu_data *cpu_data;
921686 int cpu = iter->cpu;
922
-
923
- /* If a graph tracer ignored set_graph_notrace */
924
- if (call->depth < -1)
925
- call->depth += FTRACE_NOTRACE_DEPTH;
926687
927688 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
928689 cpu_data->depth = call->depth;
....@@ -974,6 +735,10 @@
974735 /* Absolute time */
975736 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
976737 print_graph_abs_time(iter->ts, s);
738
+
739
+ /* Relative time */
740
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
741
+ print_graph_rel_time(iter, s);
977742
978743 /* Cpu */
979744 if (flags & TRACE_GRAPH_PRINT_CPU)
....@@ -1351,6 +1116,8 @@
13511116
13521117 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
13531118 size += 16;
1119
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1120
+ size += 16;
13541121 if (flags & TRACE_GRAPH_PRINT_CPU)
13551122 size += 4;
13561123 if (flags & TRACE_GRAPH_PRINT_PROC)
....@@ -1375,12 +1142,14 @@
13751142 seq_putc(s, '#');
13761143 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
13771144 seq_puts(s, " TIME ");
1145
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146
+ seq_puts(s, " REL TIME ");
13781147 if (flags & TRACE_GRAPH_PRINT_CPU)
13791148 seq_puts(s, " CPU");
13801149 if (flags & TRACE_GRAPH_PRINT_PROC)
13811150 seq_puts(s, " TASK/PID ");
13821151 if (lat)
1383
- seq_puts(s, "||||");
1152
+ seq_puts(s, "|||| ");
13841153 if (flags & TRACE_GRAPH_PRINT_DURATION)
13851154 seq_puts(s, " DURATION ");
13861155 seq_puts(s, " FUNCTION CALLS\n");
....@@ -1389,12 +1158,14 @@
13891158 seq_putc(s, '#');
13901159 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
13911160 seq_puts(s, " | ");
1161
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1162
+ seq_puts(s, " | ");
13921163 if (flags & TRACE_GRAPH_PRINT_CPU)
13931164 seq_puts(s, " | ");
13941165 if (flags & TRACE_GRAPH_PRINT_PROC)
13951166 seq_puts(s, " | | ");
13961167 if (lat)
1397
- seq_puts(s, "||||");
1168
+ seq_puts(s, "|||| ");
13981169 if (flags & TRACE_GRAPH_PRINT_DURATION)
13991170 seq_puts(s, " | | ");
14001171 seq_puts(s, " | | | |\n");
....@@ -1563,13 +1334,13 @@
15631334
15641335 static __init int init_graph_tracefs(void)
15651336 {
1566
- struct dentry *d_tracer;
1337
+ int ret;
15671338
1568
- d_tracer = tracing_init_dentry();
1569
- if (IS_ERR(d_tracer))
1339
+ ret = tracing_init_dentry();
1340
+ if (ret)
15701341 return 0;
15711342
1572
- trace_create_file("max_graph_depth", 0644, d_tracer,
1343
+ trace_create_file("max_graph_depth", 0644, NULL,
15731344 NULL, &graph_depth_fops);
15741345
15751346 return 0;