hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/trace/trace_irqsoff.c
....@@ -14,6 +14,7 @@
1414 #include <linux/uaccess.h>
1515 #include <linux/module.h>
1616 #include <linux/ftrace.h>
17
+#include <linux/kprobes.h>
1718
1819 #include "trace.h"
1920
....@@ -121,7 +122,7 @@
121122 if (!irqs_disabled_flags(*flags) && !preempt_count())
122123 return 0;
123124
124
- *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
125
+ *data = per_cpu_ptr(tr->array_buffer.data, cpu);
125126 disabled = atomic_inc_return(&(*data)->disabled);
126127
127128 if (likely(disabled == 1))
....@@ -166,7 +167,7 @@
166167 per_cpu(tracing_cpu, cpu) = 0;
167168
168169 tr->max_latency = 0;
169
- tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
170
+ tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
170171
171172 return start_irqsoff_tracer(irqsoff_trace, set);
172173 }
....@@ -218,11 +219,17 @@
218219 atomic_dec(&data->disabled);
219220 }
220221
222
+static struct fgraph_ops fgraph_ops = {
223
+ .entryfunc = &irqsoff_graph_entry,
224
+ .retfunc = &irqsoff_graph_return,
225
+};
226
+
221227 static void irqsoff_trace_open(struct trace_iterator *iter)
222228 {
223229 if (is_graph(iter->tr))
224230 graph_trace_open(iter);
225
-
231
+ else
232
+ iter->private = NULL;
226233 }
227234
228235 static void irqsoff_trace_close(struct trace_iterator *iter)
....@@ -233,7 +240,7 @@
233240
234241 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
235242 TRACE_GRAPH_PRINT_PROC | \
236
- TRACE_GRAPH_PRINT_ABS_TIME | \
243
+ TRACE_GRAPH_PRINT_REL_TIME | \
237244 TRACE_GRAPH_PRINT_DURATION)
238245
239246 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
....@@ -272,13 +279,6 @@
272279 #else
273280 #define __trace_function trace_function
274281
275
-#ifdef CONFIG_FUNCTION_TRACER
276
-static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
277
-{
278
- return -1;
279
-}
280
-#endif
281
-
282282 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
283283 {
284284 return TRACE_TYPE_UNHANDLED;
....@@ -288,7 +288,6 @@
288288 static void irqsoff_trace_close(struct trace_iterator *iter) { }
289289
290290 #ifdef CONFIG_FUNCTION_TRACER
291
-static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
292291 static void irqsoff_print_header(struct seq_file *s)
293292 {
294293 trace_default_header(s);
....@@ -368,7 +367,7 @@
368367 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
369368 }
370369
371
-static inline void
370
+static nokprobe_inline void
372371 start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
373372 {
374373 int cpu;
....@@ -384,7 +383,7 @@
384383 if (per_cpu(tracing_cpu, cpu))
385384 return;
386385
387
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
386
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
388387
389388 if (unlikely(!data) || atomic_read(&data->disabled))
390389 return;
....@@ -404,7 +403,7 @@
404403 atomic_dec(&data->disabled);
405404 }
406405
407
-static inline void
406
+static nokprobe_inline void
408407 stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
409408 {
410409 int cpu;
....@@ -422,7 +421,7 @@
422421 if (!tracer_enabled || !tracing_is_enabled())
423422 return;
424423
425
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
426425
427426 if (unlikely(!data) ||
428427 !data->critical_start || atomic_read(&data->disabled))
....@@ -446,6 +445,7 @@
446445 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
447446 }
448447 EXPORT_SYMBOL_GPL(start_critical_timings);
448
+NOKPROBE_SYMBOL(start_critical_timings);
449449
450450 void stop_critical_timings(void)
451451 {
....@@ -455,6 +455,7 @@
455455 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
456456 }
457457 EXPORT_SYMBOL_GPL(stop_critical_timings);
458
+NOKPROBE_SYMBOL(stop_critical_timings);
458459
459460 #ifdef CONFIG_FUNCTION_TRACER
460461 static bool function_enabled;
....@@ -468,8 +469,7 @@
468469 return 0;
469470
470471 if (graph)
471
- ret = register_ftrace_graph(&irqsoff_graph_return,
472
- &irqsoff_graph_entry);
472
+ ret = register_ftrace_graph(&fgraph_ops);
473473 else
474474 ret = register_ftrace_function(tr->ops);
475475
....@@ -485,7 +485,7 @@
485485 return;
486486
487487 if (graph)
488
- unregister_ftrace_graph();
488
+ unregister_ftrace_graph(&fgraph_ops);
489489 else
490490 unregister_ftrace_function(tr->ops);
491491
....@@ -563,6 +563,8 @@
563563 /* non overwrite screws up the latency tracers */
564564 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
565565 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
566
+ /* without pause, we will produce garbage if another latency occurs */
567
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
566568
567569 tr->max_latency = 0;
568570 irqsoff_trace = tr;
....@@ -584,11 +586,13 @@
584586 {
585587 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
586588 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
589
+ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
587590
588591 stop_irqsoff_tracer(tr, is_graph(tr));
589592
590593 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
591594 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
595
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
592596 ftrace_reset_array_ops(tr);
593597
594598 irqsoff_busy = false;
....@@ -615,6 +619,7 @@
615619 if (!preempt_trace(pc) && irq_trace())
616620 stop_critical_timing(a0, a1, pc);
617621 }
622
+NOKPROBE_SYMBOL(tracer_hardirqs_on);
618623
619624 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
620625 {
....@@ -623,6 +628,7 @@
623628 if (!preempt_trace(pc) && irq_trace())
624629 start_critical_timing(a0, a1, pc);
625630 }
631
+NOKPROBE_SYMBOL(tracer_hardirqs_off);
626632
627633 static int irqsoff_tracer_init(struct trace_array *tr)
628634 {