hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/trace/trace_sched_wakeup.c
....@@ -67,7 +67,7 @@
6767 static int
6868 func_prolog_preempt_disable(struct trace_array *tr,
6969 struct trace_array_cpu **data,
70
- unsigned int *trace_ctx)
70
+ int *pc)
7171 {
7272 long disabled;
7373 int cpu;
....@@ -75,7 +75,7 @@
7575 if (likely(!wakeup_task))
7676 return 0;
7777
78
- *trace_ctx = tracing_gen_ctx();
78
+ *pc = preempt_count();
7979 preempt_disable_notrace();
8080
8181 cpu = raw_smp_processor_id();
....@@ -116,8 +116,8 @@
116116 {
117117 struct trace_array *tr = wakeup_trace;
118118 struct trace_array_cpu *data;
119
- unsigned int trace_ctx;
120
- int ret = 0;
119
+ unsigned long flags;
120
+ int pc, ret = 0;
121121
122122 if (ftrace_graph_ignore_func(trace))
123123 return 0;
....@@ -131,10 +131,11 @@
131131 if (ftrace_graph_notrace_addr(trace->func))
132132 return 1;
133133
134
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
134
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
135135 return 0;
136136
137
- ret = __trace_graph_entry(tr, trace, trace_ctx);
137
+ local_save_flags(flags);
138
+ ret = __trace_graph_entry(tr, trace, flags, pc);
138139 atomic_dec(&data->disabled);
139140 preempt_enable_notrace();
140141
....@@ -145,14 +146,16 @@
145146 {
146147 struct trace_array *tr = wakeup_trace;
147148 struct trace_array_cpu *data;
148
- unsigned int trace_ctx;
149
+ unsigned long flags;
150
+ int pc;
149151
150152 ftrace_graph_addr_finish(trace);
151153
152
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
154
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
153155 return;
154156
155
- __trace_graph_return(tr, trace, trace_ctx);
157
+ local_save_flags(flags);
158
+ __trace_graph_return(tr, trace, flags, pc);
156159 atomic_dec(&data->disabled);
157160
158161 preempt_enable_notrace();
....@@ -214,13 +217,13 @@
214217 struct trace_array *tr = wakeup_trace;
215218 struct trace_array_cpu *data;
216219 unsigned long flags;
217
- unsigned int trace_ctx;
220
+ int pc;
218221
219
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
222
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
220223 return;
221224
222225 local_irq_save(flags);
223
- trace_function(tr, ip, parent_ip, trace_ctx);
226
+ trace_function(tr, ip, parent_ip, flags, pc);
224227 local_irq_restore(flags);
225228
226229 atomic_dec(&data->disabled);
....@@ -300,12 +303,12 @@
300303 static void
301304 __trace_function(struct trace_array *tr,
302305 unsigned long ip, unsigned long parent_ip,
303
- unsigned int trace_ctx)
306
+ unsigned long flags, int pc)
304307 {
305308 if (is_graph(tr))
306
- trace_graph_function(tr, ip, parent_ip, trace_ctx);
309
+ trace_graph_function(tr, ip, parent_ip, flags, pc);
307310 else
308
- trace_function(tr, ip, parent_ip, trace_ctx);
311
+ trace_function(tr, ip, parent_ip, flags, pc);
309312 }
310313
311314 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
....@@ -372,7 +375,7 @@
372375 tracing_sched_switch_trace(struct trace_array *tr,
373376 struct task_struct *prev,
374377 struct task_struct *next,
375
- unsigned int trace_ctx)
378
+ unsigned long flags, int pc)
376379 {
377380 struct trace_event_call *call = &event_context_switch;
378381 struct trace_buffer *buffer = tr->array_buffer.buffer;
....@@ -380,7 +383,7 @@
380383 struct ctx_switch_entry *entry;
381384
382385 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
383
- sizeof(*entry), trace_ctx);
386
+ sizeof(*entry), flags, pc);
384387 if (!event)
385388 return;
386389 entry = ring_buffer_event_data(event);
....@@ -393,14 +396,14 @@
393396 entry->next_cpu = task_cpu(next);
394397
395398 if (!call_filter_check_discard(call, entry, buffer, event))
396
- trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
399
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
397400 }
398401
399402 static void
400403 tracing_sched_wakeup_trace(struct trace_array *tr,
401404 struct task_struct *wakee,
402405 struct task_struct *curr,
403
- unsigned int trace_ctx)
406
+ unsigned long flags, int pc)
404407 {
405408 struct trace_event_call *call = &event_wakeup;
406409 struct ring_buffer_event *event;
....@@ -408,7 +411,7 @@
408411 struct trace_buffer *buffer = tr->array_buffer.buffer;
409412
410413 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
411
- sizeof(*entry), trace_ctx);
414
+ sizeof(*entry), flags, pc);
412415 if (!event)
413416 return;
414417 entry = ring_buffer_event_data(event);
....@@ -421,7 +424,7 @@
421424 entry->next_cpu = task_cpu(wakee);
422425
423426 if (!call_filter_check_discard(call, entry, buffer, event))
424
- trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
427
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
425428 }
426429
427430 static void notrace
....@@ -433,7 +436,7 @@
433436 unsigned long flags;
434437 long disabled;
435438 int cpu;
436
- unsigned int trace_ctx;
439
+ int pc;
437440
438441 tracing_record_cmdline(prev);
439442
....@@ -452,6 +455,8 @@
452455 if (next != wakeup_task)
453456 return;
454457
458
+ pc = preempt_count();
459
+
455460 /* disable local data, not wakeup_cpu data */
456461 cpu = raw_smp_processor_id();
457462 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
....@@ -459,8 +464,6 @@
459464 goto out;
460465
461466 local_irq_save(flags);
462
- trace_ctx = tracing_gen_ctx_flags(flags);
463
-
464467 arch_spin_lock(&wakeup_lock);
465468
466469 /* We could race with grabbing wakeup_lock */
....@@ -470,9 +473,9 @@
470473 /* The task we are waiting for is waking up */
471474 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
472475
473
- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
474
- tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
475
- __trace_stack(wakeup_trace, trace_ctx, 0);
476
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
477
+ tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
478
+ __trace_stack(wakeup_trace, flags, 0, pc);
476479
477480 T0 = data->preempt_timestamp;
478481 T1 = ftrace_now(cpu);
....@@ -524,8 +527,9 @@
524527 {
525528 struct trace_array_cpu *data;
526529 int cpu = smp_processor_id();
530
+ unsigned long flags;
527531 long disabled;
528
- unsigned int trace_ctx;
532
+ int pc;
529533
530534 if (likely(!tracer_enabled))
531535 return;
....@@ -546,11 +550,10 @@
546550 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
547551 return;
548552
553
+ pc = preempt_count();
549554 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
550555 if (unlikely(disabled != 1))
551556 goto out;
552
-
553
- trace_ctx = tracing_gen_ctx();
554557
555558 /* interrupts should be off from try_to_wake_up */
556559 arch_spin_lock(&wakeup_lock);
....@@ -578,17 +581,19 @@
578581
579582 wakeup_task = get_task_struct(p);
580583
584
+ local_save_flags(flags);
585
+
581586 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
582587 data->preempt_timestamp = ftrace_now(cpu);
583
- tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
584
- __trace_stack(wakeup_trace, trace_ctx, 0);
588
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
589
+ __trace_stack(wakeup_trace, flags, 0, pc);
585590
586591 /*
587592 * We must be careful in using CALLER_ADDR2. But since wake_up
588593 * is not called by an assembly function (where as schedule is)
589594 * it should be safe to use it here.
590595 */
591
- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
596
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
592597
593598 out_locked:
594599 arch_spin_unlock(&wakeup_lock);