hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/trace/trace_irqsoff.c
....@@ -143,14 +143,11 @@
143143 struct trace_array *tr = irqsoff_trace;
144144 struct trace_array_cpu *data;
145145 unsigned long flags;
146
- unsigned int trace_ctx;
147146
148147 if (!func_prolog_dec(tr, &data, &flags))
149148 return;
150149
151
- trace_ctx = tracing_gen_ctx_flags(flags);
152
-
153
- trace_function(tr, ip, parent_ip, trace_ctx);
150
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
154151
155152 atomic_dec(&data->disabled);
156153 }
....@@ -180,8 +177,8 @@
180177 struct trace_array *tr = irqsoff_trace;
181178 struct trace_array_cpu *data;
182179 unsigned long flags;
183
- unsigned int trace_ctx;
184180 int ret;
181
+ int pc;
185182
186183 if (ftrace_graph_ignore_func(trace))
187184 return 0;
....@@ -198,8 +195,8 @@
198195 if (!func_prolog_dec(tr, &data, &flags))
199196 return 0;
200197
201
- trace_ctx = tracing_gen_ctx_flags(flags);
202
- ret = __trace_graph_entry(tr, trace, trace_ctx);
198
+ pc = preempt_count();
199
+ ret = __trace_graph_entry(tr, trace, flags, pc);
203200 atomic_dec(&data->disabled);
204201
205202 return ret;
....@@ -210,15 +207,15 @@
210207 struct trace_array *tr = irqsoff_trace;
211208 struct trace_array_cpu *data;
212209 unsigned long flags;
213
- unsigned int trace_ctx;
210
+ int pc;
214211
215212 ftrace_graph_addr_finish(trace);
216213
217214 if (!func_prolog_dec(tr, &data, &flags))
218215 return;
219216
220
- trace_ctx = tracing_gen_ctx_flags(flags);
221
- __trace_graph_return(tr, trace, trace_ctx);
217
+ pc = preempt_count();
218
+ __trace_graph_return(tr, trace, flags, pc);
222219 atomic_dec(&data->disabled);
223220 }
224221
....@@ -270,12 +267,12 @@
270267 static void
271268 __trace_function(struct trace_array *tr,
272269 unsigned long ip, unsigned long parent_ip,
273
- unsigned int trace_ctx)
270
+ unsigned long flags, int pc)
274271 {
275272 if (is_graph(tr))
276
- trace_graph_function(tr, ip, parent_ip, trace_ctx);
273
+ trace_graph_function(tr, ip, parent_ip, flags, pc);
277274 else
278
- trace_function(tr, ip, parent_ip, trace_ctx);
275
+ trace_function(tr, ip, parent_ip, flags, pc);
279276 }
280277
281278 #else
....@@ -325,13 +322,15 @@
325322 {
326323 u64 T0, T1, delta;
327324 unsigned long flags;
328
- unsigned int trace_ctx;
325
+ int pc;
329326
330327 T0 = data->preempt_timestamp;
331328 T1 = ftrace_now(cpu);
332329 delta = T1-T0;
333330
334
- trace_ctx = tracing_gen_ctx();
331
+ local_save_flags(flags);
332
+
333
+ pc = preempt_count();
335334
336335 if (!report_latency(tr, delta))
337336 goto out;
....@@ -342,9 +341,9 @@
342341 if (!report_latency(tr, delta))
343342 goto out_unlock;
344343
345
- __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
344
+ __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
346345 /* Skip 5 functions to get to the irq/preempt enable function */
347
- __trace_stack(tr, trace_ctx, 5);
346
+ __trace_stack(tr, flags, 5, pc);
348347
349348 if (data->critical_sequence != max_sequence)
350349 goto out_unlock;
....@@ -364,15 +363,16 @@
364363 out:
365364 data->critical_sequence = max_sequence;
366365 data->preempt_timestamp = ftrace_now(cpu);
367
- __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
366
+ __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
368367 }
369368
370369 static nokprobe_inline void
371
-start_critical_timing(unsigned long ip, unsigned long parent_ip)
370
+start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
372371 {
373372 int cpu;
374373 struct trace_array *tr = irqsoff_trace;
375374 struct trace_array_cpu *data;
375
+ unsigned long flags;
376376
377377 if (!tracer_enabled || !tracing_is_enabled())
378378 return;
....@@ -393,7 +393,9 @@
393393 data->preempt_timestamp = ftrace_now(cpu);
394394 data->critical_start = parent_ip ? : ip;
395395
396
- __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
396
+ local_save_flags(flags);
397
+
398
+ __trace_function(tr, ip, parent_ip, flags, pc);
397399
398400 per_cpu(tracing_cpu, cpu) = 1;
399401
....@@ -401,12 +403,12 @@
401403 }
402404
403405 static nokprobe_inline void
404
-stop_critical_timing(unsigned long ip, unsigned long parent_ip)
406
+stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
405407 {
406408 int cpu;
407409 struct trace_array *tr = irqsoff_trace;
408410 struct trace_array_cpu *data;
409
- unsigned int trace_ctx;
411
+ unsigned long flags;
410412
411413 cpu = raw_smp_processor_id();
412414 /* Always clear the tracing cpu on stopping the trace */
....@@ -426,8 +428,8 @@
426428
427429 atomic_inc(&data->disabled);
428430
429
- trace_ctx = tracing_gen_ctx();
430
- __trace_function(tr, ip, parent_ip, trace_ctx);
431
+ local_save_flags(flags);
432
+ __trace_function(tr, ip, parent_ip, flags, pc);
431433 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
432434 data->critical_start = 0;
433435 atomic_dec(&data->disabled);
....@@ -436,16 +438,20 @@
436438 /* start and stop critical timings used to for stoppage (in idle) */
437439 void start_critical_timings(void)
438440 {
439
- if (preempt_trace(preempt_count()) || irq_trace())
440
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
441
+ int pc = preempt_count();
442
+
443
+ if (preempt_trace(pc) || irq_trace())
444
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
441445 }
442446 EXPORT_SYMBOL_GPL(start_critical_timings);
443447 NOKPROBE_SYMBOL(start_critical_timings);
444448
445449 void stop_critical_timings(void)
446450 {
447
- if (preempt_trace(preempt_count()) || irq_trace())
448
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
451
+ int pc = preempt_count();
452
+
453
+ if (preempt_trace(pc) || irq_trace())
454
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
449455 }
450456 EXPORT_SYMBOL_GPL(stop_critical_timings);
451457 NOKPROBE_SYMBOL(stop_critical_timings);
....@@ -607,15 +613,19 @@
607613 */
608614 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
609615 {
610
- if (!preempt_trace(preempt_count()) && irq_trace())
611
- stop_critical_timing(a0, a1);
616
+ unsigned int pc = preempt_count();
617
+
618
+ if (!preempt_trace(pc) && irq_trace())
619
+ stop_critical_timing(a0, a1, pc);
612620 }
613621 NOKPROBE_SYMBOL(tracer_hardirqs_on);
614622
615623 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
616624 {
617
- if (!preempt_trace(preempt_count()) && irq_trace())
618
- start_critical_timing(a0, a1);
625
+ unsigned int pc = preempt_count();
626
+
627
+ if (!preempt_trace(pc) && irq_trace())
628
+ start_critical_timing(a0, a1, pc);
619629 }
620630 NOKPROBE_SYMBOL(tracer_hardirqs_off);
621631
....@@ -655,14 +665,18 @@
655665 #ifdef CONFIG_PREEMPT_TRACER
656666 void tracer_preempt_on(unsigned long a0, unsigned long a1)
657667 {
658
- if (preempt_trace(preempt_count()) && !irq_trace())
659
- stop_critical_timing(a0, a1);
668
+ int pc = preempt_count();
669
+
670
+ if (preempt_trace(pc) && !irq_trace())
671
+ stop_critical_timing(a0, a1, pc);
660672 }
661673
662674 void tracer_preempt_off(unsigned long a0, unsigned long a1)
663675 {
664
- if (preempt_trace(preempt_count()) && !irq_trace())
665
- start_critical_timing(a0, a1);
676
+ int pc = preempt_count();
677
+
678
+ if (preempt_trace(pc) && !irq_trace())
679
+ start_critical_timing(a0, a1, pc);
666680 }
667681
668682 static int preemptoff_tracer_init(struct trace_array *tr)