.. | .. |
---|
14 | 14 | #include <linux/uaccess.h> |
---|
15 | 15 | #include <linux/module.h> |
---|
16 | 16 | #include <linux/ftrace.h> |
---|
| 17 | +#include <linux/kprobes.h> |
---|
17 | 18 | |
---|
18 | 19 | #include "trace.h" |
---|
19 | 20 | |
---|
.. | .. |
---|
121 | 122 | if (!irqs_disabled_flags(*flags) && !preempt_count()) |
---|
122 | 123 | return 0; |
---|
123 | 124 | |
---|
124 | | - *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
---|
| 125 | + *data = per_cpu_ptr(tr->array_buffer.data, cpu); |
---|
125 | 126 | disabled = atomic_inc_return(&(*data)->disabled); |
---|
126 | 127 | |
---|
127 | 128 | if (likely(disabled == 1)) |
---|
.. | .. |
---|
142 | 143 | struct trace_array *tr = irqsoff_trace; |
---|
143 | 144 | struct trace_array_cpu *data; |
---|
144 | 145 | unsigned long flags; |
---|
| 146 | + unsigned int trace_ctx; |
---|
145 | 147 | |
---|
146 | 148 | if (!func_prolog_dec(tr, &data, &flags)) |
---|
147 | 149 | return; |
---|
148 | 150 | |
---|
149 | | - trace_function(tr, ip, parent_ip, flags, preempt_count()); |
---|
| 151 | + trace_ctx = tracing_gen_ctx_flags(flags); |
---|
| 152 | + |
---|
| 153 | + trace_function(tr, ip, parent_ip, trace_ctx); |
---|
150 | 154 | |
---|
151 | 155 | atomic_dec(&data->disabled); |
---|
152 | 156 | } |
---|
.. | .. |
---|
166 | 170 | per_cpu(tracing_cpu, cpu) = 0; |
---|
167 | 171 | |
---|
168 | 172 | tr->max_latency = 0; |
---|
169 | | - tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
---|
| 173 | + tracing_reset_online_cpus(&irqsoff_trace->array_buffer); |
---|
170 | 174 | |
---|
171 | 175 | return start_irqsoff_tracer(irqsoff_trace, set); |
---|
172 | 176 | } |
---|
.. | .. |
---|
176 | 180 | struct trace_array *tr = irqsoff_trace; |
---|
177 | 181 | struct trace_array_cpu *data; |
---|
178 | 182 | unsigned long flags; |
---|
| 183 | + unsigned int trace_ctx; |
---|
179 | 184 | int ret; |
---|
180 | | - int pc; |
---|
181 | 185 | |
---|
182 | 186 | if (ftrace_graph_ignore_func(trace)) |
---|
183 | 187 | return 0; |
---|
.. | .. |
---|
194 | 198 | if (!func_prolog_dec(tr, &data, &flags)) |
---|
195 | 199 | return 0; |
---|
196 | 200 | |
---|
197 | | - pc = preempt_count(); |
---|
198 | | - ret = __trace_graph_entry(tr, trace, flags, pc); |
---|
| 201 | + trace_ctx = tracing_gen_ctx_flags(flags); |
---|
| 202 | + ret = __trace_graph_entry(tr, trace, trace_ctx); |
---|
199 | 203 | atomic_dec(&data->disabled); |
---|
200 | 204 | |
---|
201 | 205 | return ret; |
---|
.. | .. |
---|
206 | 210 | struct trace_array *tr = irqsoff_trace; |
---|
207 | 211 | struct trace_array_cpu *data; |
---|
208 | 212 | unsigned long flags; |
---|
209 | | - int pc; |
---|
| 213 | + unsigned int trace_ctx; |
---|
210 | 214 | |
---|
211 | 215 | ftrace_graph_addr_finish(trace); |
---|
212 | 216 | |
---|
213 | 217 | if (!func_prolog_dec(tr, &data, &flags)) |
---|
214 | 218 | return; |
---|
215 | 219 | |
---|
216 | | - pc = preempt_count(); |
---|
217 | | - __trace_graph_return(tr, trace, flags, pc); |
---|
| 220 | + trace_ctx = tracing_gen_ctx_flags(flags); |
---|
| 221 | + __trace_graph_return(tr, trace, trace_ctx); |
---|
218 | 222 | atomic_dec(&data->disabled); |
---|
219 | 223 | } |
---|
| 224 | + |
---|
| 225 | +static struct fgraph_ops fgraph_ops = { |
---|
| 226 | + .entryfunc = &irqsoff_graph_entry, |
---|
| 227 | + .retfunc = &irqsoff_graph_return, |
---|
| 228 | +}; |
---|
220 | 229 | |
---|
221 | 230 | static void irqsoff_trace_open(struct trace_iterator *iter) |
---|
222 | 231 | { |
---|
.. | .. |
---|
233 | 242 | |
---|
234 | 243 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ |
---|
235 | 244 | TRACE_GRAPH_PRINT_PROC | \ |
---|
236 | | - TRACE_GRAPH_PRINT_ABS_TIME | \ |
---|
| 245 | + TRACE_GRAPH_PRINT_REL_TIME | \ |
---|
237 | 246 | TRACE_GRAPH_PRINT_DURATION) |
---|
238 | 247 | |
---|
239 | 248 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
---|
.. | .. |
---|
261 | 270 | static void |
---|
262 | 271 | __trace_function(struct trace_array *tr, |
---|
263 | 272 | unsigned long ip, unsigned long parent_ip, |
---|
264 | | - unsigned long flags, int pc) |
---|
| 273 | + unsigned int trace_ctx) |
---|
265 | 274 | { |
---|
266 | 275 | if (is_graph(tr)) |
---|
267 | | - trace_graph_function(tr, ip, parent_ip, flags, pc); |
---|
| 276 | + trace_graph_function(tr, ip, parent_ip, trace_ctx); |
---|
268 | 277 | else |
---|
269 | | - trace_function(tr, ip, parent_ip, flags, pc); |
---|
| 278 | + trace_function(tr, ip, parent_ip, trace_ctx); |
---|
270 | 279 | } |
---|
271 | 280 | |
---|
272 | 281 | #else |
---|
273 | 282 | #define __trace_function trace_function |
---|
274 | | - |
---|
275 | | -#ifdef CONFIG_FUNCTION_TRACER |
---|
276 | | -static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
---|
277 | | -{ |
---|
278 | | - return -1; |
---|
279 | | -} |
---|
280 | | -#endif |
---|
281 | 283 | |
---|
282 | 284 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
---|
283 | 285 | { |
---|
.. | .. |
---|
288 | 290 | static void irqsoff_trace_close(struct trace_iterator *iter) { } |
---|
289 | 291 | |
---|
290 | 292 | #ifdef CONFIG_FUNCTION_TRACER |
---|
291 | | -static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
---|
292 | 293 | static void irqsoff_print_header(struct seq_file *s) |
---|
293 | 294 | { |
---|
294 | 295 | trace_default_header(s); |
---|
.. | .. |
---|
324 | 325 | { |
---|
325 | 326 | u64 T0, T1, delta; |
---|
326 | 327 | unsigned long flags; |
---|
327 | | - int pc; |
---|
| 328 | + unsigned int trace_ctx; |
---|
328 | 329 | |
---|
329 | 330 | T0 = data->preempt_timestamp; |
---|
330 | 331 | T1 = ftrace_now(cpu); |
---|
331 | 332 | delta = T1-T0; |
---|
332 | 333 | |
---|
333 | | - local_save_flags(flags); |
---|
334 | | - |
---|
335 | | - pc = preempt_count(); |
---|
| 334 | + trace_ctx = tracing_gen_ctx(); |
---|
336 | 335 | |
---|
337 | 336 | if (!report_latency(tr, delta)) |
---|
338 | 337 | goto out; |
---|
.. | .. |
---|
343 | 342 | if (!report_latency(tr, delta)) |
---|
344 | 343 | goto out_unlock; |
---|
345 | 344 | |
---|
346 | | - __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
---|
| 345 | + __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx); |
---|
347 | 346 | /* Skip 5 functions to get to the irq/preempt enable function */ |
---|
348 | | - __trace_stack(tr, flags, 5, pc); |
---|
| 347 | + __trace_stack(tr, trace_ctx, 5); |
---|
349 | 348 | |
---|
350 | 349 | if (data->critical_sequence != max_sequence) |
---|
351 | 350 | goto out_unlock; |
---|
.. | .. |
---|
365 | 364 | out: |
---|
366 | 365 | data->critical_sequence = max_sequence; |
---|
367 | 366 | data->preempt_timestamp = ftrace_now(cpu); |
---|
368 | | - __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
---|
| 367 | + __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx); |
---|
369 | 368 | } |
---|
370 | 369 | |
---|
371 | | -static inline void |
---|
372 | | -start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) |
---|
| 370 | +static nokprobe_inline void |
---|
| 371 | +start_critical_timing(unsigned long ip, unsigned long parent_ip) |
---|
373 | 372 | { |
---|
374 | 373 | int cpu; |
---|
375 | 374 | struct trace_array *tr = irqsoff_trace; |
---|
376 | 375 | struct trace_array_cpu *data; |
---|
377 | | - unsigned long flags; |
---|
378 | 376 | |
---|
379 | 377 | if (!tracer_enabled || !tracing_is_enabled()) |
---|
380 | 378 | return; |
---|
.. | .. |
---|
384 | 382 | if (per_cpu(tracing_cpu, cpu)) |
---|
385 | 383 | return; |
---|
386 | 384 | |
---|
387 | | - data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
---|
| 385 | + data = per_cpu_ptr(tr->array_buffer.data, cpu); |
---|
388 | 386 | |
---|
389 | 387 | if (unlikely(!data) || atomic_read(&data->disabled)) |
---|
390 | 388 | return; |
---|
.. | .. |
---|
395 | 393 | data->preempt_timestamp = ftrace_now(cpu); |
---|
396 | 394 | data->critical_start = parent_ip ? : ip; |
---|
397 | 395 | |
---|
398 | | - local_save_flags(flags); |
---|
399 | | - |
---|
400 | | - __trace_function(tr, ip, parent_ip, flags, pc); |
---|
| 396 | + __trace_function(tr, ip, parent_ip, tracing_gen_ctx()); |
---|
401 | 397 | |
---|
402 | 398 | per_cpu(tracing_cpu, cpu) = 1; |
---|
403 | 399 | |
---|
404 | 400 | atomic_dec(&data->disabled); |
---|
405 | 401 | } |
---|
406 | 402 | |
---|
407 | | -static inline void |
---|
408 | | -stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) |
---|
| 403 | +static nokprobe_inline void |
---|
| 404 | +stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
---|
409 | 405 | { |
---|
410 | 406 | int cpu; |
---|
411 | 407 | struct trace_array *tr = irqsoff_trace; |
---|
412 | 408 | struct trace_array_cpu *data; |
---|
413 | | - unsigned long flags; |
---|
| 409 | + unsigned int trace_ctx; |
---|
414 | 410 | |
---|
415 | 411 | cpu = raw_smp_processor_id(); |
---|
416 | 412 | /* Always clear the tracing cpu on stopping the trace */ |
---|
.. | .. |
---|
422 | 418 | if (!tracer_enabled || !tracing_is_enabled()) |
---|
423 | 419 | return; |
---|
424 | 420 | |
---|
425 | | - data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
---|
| 421 | + data = per_cpu_ptr(tr->array_buffer.data, cpu); |
---|
426 | 422 | |
---|
427 | 423 | if (unlikely(!data) || |
---|
428 | 424 | !data->critical_start || atomic_read(&data->disabled)) |
---|
.. | .. |
---|
430 | 426 | |
---|
431 | 427 | atomic_inc(&data->disabled); |
---|
432 | 428 | |
---|
433 | | - local_save_flags(flags); |
---|
434 | | - __trace_function(tr, ip, parent_ip, flags, pc); |
---|
| 429 | + trace_ctx = tracing_gen_ctx(); |
---|
| 430 | + __trace_function(tr, ip, parent_ip, trace_ctx); |
---|
435 | 431 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
---|
436 | 432 | data->critical_start = 0; |
---|
437 | 433 | atomic_dec(&data->disabled); |
---|
.. | .. |
---|
440 | 436 | /* start and stop critical timings used to for stoppage (in idle) */ |
---|
441 | 437 | void start_critical_timings(void) |
---|
442 | 438 | { |
---|
443 | | - int pc = preempt_count(); |
---|
444 | | - |
---|
445 | | - if (preempt_trace(pc) || irq_trace()) |
---|
446 | | - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc); |
---|
| 439 | + if (preempt_trace(preempt_count()) || irq_trace()) |
---|
| 440 | + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
---|
447 | 441 | } |
---|
448 | 442 | EXPORT_SYMBOL_GPL(start_critical_timings); |
---|
| 443 | +NOKPROBE_SYMBOL(start_critical_timings); |
---|
449 | 444 | |
---|
450 | 445 | void stop_critical_timings(void) |
---|
451 | 446 | { |
---|
452 | | - int pc = preempt_count(); |
---|
453 | | - |
---|
454 | | - if (preempt_trace(pc) || irq_trace()) |
---|
455 | | - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc); |
---|
| 447 | + if (preempt_trace(preempt_count()) || irq_trace()) |
---|
| 448 | + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
---|
456 | 449 | } |
---|
457 | 450 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
---|
| 451 | +NOKPROBE_SYMBOL(stop_critical_timings); |
---|
458 | 452 | |
---|
459 | 453 | #ifdef CONFIG_FUNCTION_TRACER |
---|
460 | 454 | static bool function_enabled; |
---|
.. | .. |
---|
468 | 462 | return 0; |
---|
469 | 463 | |
---|
470 | 464 | if (graph) |
---|
471 | | - ret = register_ftrace_graph(&irqsoff_graph_return, |
---|
472 | | - &irqsoff_graph_entry); |
---|
| 465 | + ret = register_ftrace_graph(&fgraph_ops); |
---|
473 | 466 | else |
---|
474 | 467 | ret = register_ftrace_function(tr->ops); |
---|
475 | 468 | |
---|
.. | .. |
---|
485 | 478 | return; |
---|
486 | 479 | |
---|
487 | 480 | if (graph) |
---|
488 | | - unregister_ftrace_graph(); |
---|
| 481 | + unregister_ftrace_graph(&fgraph_ops); |
---|
489 | 482 | else |
---|
490 | 483 | unregister_ftrace_function(tr->ops); |
---|
491 | 484 | |
---|
.. | .. |
---|
563 | 556 | /* non overwrite screws up the latency tracers */ |
---|
564 | 557 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
---|
565 | 558 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
---|
| 559 | + /* without pause, we will produce garbage if another latency occurs */ |
---|
| 560 | + set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1); |
---|
566 | 561 | |
---|
567 | 562 | tr->max_latency = 0; |
---|
568 | 563 | irqsoff_trace = tr; |
---|
.. | .. |
---|
584 | 579 | { |
---|
585 | 580 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
---|
586 | 581 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; |
---|
| 582 | + int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE; |
---|
587 | 583 | |
---|
588 | 584 | stop_irqsoff_tracer(tr, is_graph(tr)); |
---|
589 | 585 | |
---|
590 | 586 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
---|
591 | 587 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
---|
| 588 | + set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag); |
---|
592 | 589 | ftrace_reset_array_ops(tr); |
---|
593 | 590 | |
---|
594 | 591 | irqsoff_busy = false; |
---|
.. | .. |
---|
610 | 607 | */ |
---|
611 | 608 | void tracer_hardirqs_on(unsigned long a0, unsigned long a1) |
---|
612 | 609 | { |
---|
613 | | - unsigned int pc = preempt_count(); |
---|
614 | | - |
---|
615 | | - if (!preempt_trace(pc) && irq_trace()) |
---|
616 | | - stop_critical_timing(a0, a1, pc); |
---|
| 610 | + if (!preempt_trace(preempt_count()) && irq_trace()) |
---|
| 611 | + stop_critical_timing(a0, a1); |
---|
617 | 612 | } |
---|
| 613 | +NOKPROBE_SYMBOL(tracer_hardirqs_on); |
---|
618 | 614 | |
---|
619 | 615 | void tracer_hardirqs_off(unsigned long a0, unsigned long a1) |
---|
620 | 616 | { |
---|
621 | | - unsigned int pc = preempt_count(); |
---|
622 | | - |
---|
623 | | - if (!preempt_trace(pc) && irq_trace()) |
---|
624 | | - start_critical_timing(a0, a1, pc); |
---|
| 617 | + if (!preempt_trace(preempt_count()) && irq_trace()) |
---|
| 618 | + start_critical_timing(a0, a1); |
---|
625 | 619 | } |
---|
| 620 | +NOKPROBE_SYMBOL(tracer_hardirqs_off); |
---|
626 | 621 | |
---|
627 | 622 | static int irqsoff_tracer_init(struct trace_array *tr) |
---|
628 | 623 | { |
---|
.. | .. |
---|
660 | 655 | #ifdef CONFIG_PREEMPT_TRACER |
---|
661 | 656 | void tracer_preempt_on(unsigned long a0, unsigned long a1) |
---|
662 | 657 | { |
---|
663 | | - int pc = preempt_count(); |
---|
664 | | - |
---|
665 | | - if (preempt_trace(pc) && !irq_trace()) |
---|
666 | | - stop_critical_timing(a0, a1, pc); |
---|
| 658 | + if (preempt_trace(preempt_count()) && !irq_trace()) |
---|
| 659 | + stop_critical_timing(a0, a1); |
---|
667 | 660 | } |
---|
668 | 661 | |
---|
669 | 662 | void tracer_preempt_off(unsigned long a0, unsigned long a1) |
---|
670 | 663 | { |
---|
671 | | - int pc = preempt_count(); |
---|
672 | | - |
---|
673 | | - if (preempt_trace(pc) && !irq_trace()) |
---|
674 | | - start_critical_timing(a0, a1, pc); |
---|
| 664 | + if (preempt_trace(preempt_count()) && !irq_trace()) |
---|
| 665 | + start_critical_timing(a0, a1); |
---|
675 | 666 | } |
---|
676 | 667 | |
---|
677 | 668 | static int preemptoff_tracer_init(struct trace_array *tr) |
---|