hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/trace/trace_sched_wakeup.c
....@@ -67,7 +67,7 @@
6767 static int
6868 func_prolog_preempt_disable(struct trace_array *tr,
6969 struct trace_array_cpu **data,
70
- unsigned int *trace_ctx)
70
+ int *pc)
7171 {
7272 long disabled;
7373 int cpu;
....@@ -75,7 +75,7 @@
7575 if (likely(!wakeup_task))
7676 return 0;
7777
78
- *trace_ctx = tracing_gen_ctx();
78
+ *pc = preempt_count();
7979 preempt_disable_notrace();
8080
8181 cpu = raw_smp_processor_id();
....@@ -116,8 +116,8 @@
116116 {
117117 struct trace_array *tr = wakeup_trace;
118118 struct trace_array_cpu *data;
119
- unsigned int trace_ctx;
120
- int ret = 0;
119
+ unsigned long flags;
120
+ int pc, ret = 0;
121121
122122 if (ftrace_graph_ignore_func(trace))
123123 return 0;
....@@ -131,10 +131,11 @@
131131 if (ftrace_graph_notrace_addr(trace->func))
132132 return 1;
133133
134
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
134
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
135135 return 0;
136136
137
- ret = __trace_graph_entry(tr, trace, trace_ctx);
137
+ local_save_flags(flags);
138
+ ret = __trace_graph_entry(tr, trace, flags, pc);
138139 atomic_dec(&data->disabled);
139140 preempt_enable_notrace();
140141
....@@ -145,14 +146,16 @@
145146 {
146147 struct trace_array *tr = wakeup_trace;
147148 struct trace_array_cpu *data;
148
- unsigned int trace_ctx;
149
+ unsigned long flags;
150
+ int pc;
149151
150152 ftrace_graph_addr_finish(trace);
151153
152
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
154
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
153155 return;
154156
155
- __trace_graph_return(tr, trace, trace_ctx);
157
+ local_save_flags(flags);
158
+ __trace_graph_return(tr, trace, flags, pc);
156159 atomic_dec(&data->disabled);
157160
158161 preempt_enable_notrace();
....@@ -168,6 +171,8 @@
168171 {
169172 if (is_graph(iter->tr))
170173 graph_trace_open(iter);
174
+ else
175
+ iter->private = NULL;
171176 }
172177
173178 static void wakeup_trace_close(struct trace_iterator *iter)
....@@ -214,13 +219,13 @@
214219 struct trace_array *tr = wakeup_trace;
215220 struct trace_array_cpu *data;
216221 unsigned long flags;
217
- unsigned int trace_ctx;
222
+ int pc;
218223
219
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
224
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
220225 return;
221226
222227 local_irq_save(flags);
223
- trace_function(tr, ip, parent_ip, trace_ctx);
228
+ trace_function(tr, ip, parent_ip, flags, pc);
224229 local_irq_restore(flags);
225230
226231 atomic_dec(&data->disabled);
....@@ -300,12 +305,12 @@
300305 static void
301306 __trace_function(struct trace_array *tr,
302307 unsigned long ip, unsigned long parent_ip,
303
- unsigned int trace_ctx)
308
+ unsigned long flags, int pc)
304309 {
305310 if (is_graph(tr))
306
- trace_graph_function(tr, ip, parent_ip, trace_ctx);
311
+ trace_graph_function(tr, ip, parent_ip, flags, pc);
307312 else
308
- trace_function(tr, ip, parent_ip, trace_ctx);
313
+ trace_function(tr, ip, parent_ip, flags, pc);
309314 }
310315
311316 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
....@@ -372,7 +377,7 @@
372377 tracing_sched_switch_trace(struct trace_array *tr,
373378 struct task_struct *prev,
374379 struct task_struct *next,
375
- unsigned int trace_ctx)
380
+ unsigned long flags, int pc)
376381 {
377382 struct trace_event_call *call = &event_context_switch;
378383 struct trace_buffer *buffer = tr->array_buffer.buffer;
....@@ -380,7 +385,7 @@
380385 struct ctx_switch_entry *entry;
381386
382387 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
383
- sizeof(*entry), trace_ctx);
388
+ sizeof(*entry), flags, pc);
384389 if (!event)
385390 return;
386391 entry = ring_buffer_event_data(event);
....@@ -393,14 +398,14 @@
393398 entry->next_cpu = task_cpu(next);
394399
395400 if (!call_filter_check_discard(call, entry, buffer, event))
396
- trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
401
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
397402 }
398403
399404 static void
400405 tracing_sched_wakeup_trace(struct trace_array *tr,
401406 struct task_struct *wakee,
402407 struct task_struct *curr,
403
- unsigned int trace_ctx)
408
+ unsigned long flags, int pc)
404409 {
405410 struct trace_event_call *call = &event_wakeup;
406411 struct ring_buffer_event *event;
....@@ -408,7 +413,7 @@
408413 struct trace_buffer *buffer = tr->array_buffer.buffer;
409414
410415 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
411
- sizeof(*entry), trace_ctx);
416
+ sizeof(*entry), flags, pc);
412417 if (!event)
413418 return;
414419 entry = ring_buffer_event_data(event);
....@@ -421,7 +426,7 @@
421426 entry->next_cpu = task_cpu(wakee);
422427
423428 if (!call_filter_check_discard(call, entry, buffer, event))
424
- trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
429
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
425430 }
426431
427432 static void notrace
....@@ -433,7 +438,7 @@
433438 unsigned long flags;
434439 long disabled;
435440 int cpu;
436
- unsigned int trace_ctx;
441
+ int pc;
437442
438443 tracing_record_cmdline(prev);
439444
....@@ -452,6 +457,8 @@
452457 if (next != wakeup_task)
453458 return;
454459
460
+ pc = preempt_count();
461
+
455462 /* disable local data, not wakeup_cpu data */
456463 cpu = raw_smp_processor_id();
457464 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
....@@ -459,8 +466,6 @@
459466 goto out;
460467
461468 local_irq_save(flags);
462
- trace_ctx = tracing_gen_ctx_flags(flags);
463
-
464469 arch_spin_lock(&wakeup_lock);
465470
466471 /* We could race with grabbing wakeup_lock */
....@@ -470,9 +475,9 @@
470475 /* The task we are waiting for is waking up */
471476 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
472477
473
- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
474
- tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
475
- __trace_stack(wakeup_trace, trace_ctx, 0);
478
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
479
+ tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
480
+ __trace_stack(wakeup_trace, flags, 0, pc);
476481
477482 T0 = data->preempt_timestamp;
478483 T1 = ftrace_now(cpu);
....@@ -524,8 +529,9 @@
524529 {
525530 struct trace_array_cpu *data;
526531 int cpu = smp_processor_id();
532
+ unsigned long flags;
527533 long disabled;
528
- unsigned int trace_ctx;
534
+ int pc;
529535
530536 if (likely(!tracer_enabled))
531537 return;
....@@ -546,11 +552,10 @@
546552 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
547553 return;
548554
555
+ pc = preempt_count();
549556 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
550557 if (unlikely(disabled != 1))
551558 goto out;
552
-
553
- trace_ctx = tracing_gen_ctx();
554559
555560 /* interrupts should be off from try_to_wake_up */
556561 arch_spin_lock(&wakeup_lock);
....@@ -578,17 +583,19 @@
578583
579584 wakeup_task = get_task_struct(p);
580585
586
+ local_save_flags(flags);
587
+
581588 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
582589 data->preempt_timestamp = ftrace_now(cpu);
583
- tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
584
- __trace_stack(wakeup_trace, trace_ctx, 0);
590
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
591
+ __trace_stack(wakeup_trace, flags, 0, pc);
585592
586593 /*
587594 * We must be careful in using CALLER_ADDR2. But since wake_up
588595 * is not called by an assembly function (where as schedule is)
589596 * it should be safe to use it here.
590597 */
591
- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
598
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
592599
593600 out_locked:
594601 arch_spin_unlock(&wakeup_lock);