hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/trace/trace_sched_wakeup.c
....@@ -35,25 +35,18 @@
3535
3636 static void wakeup_reset(struct trace_array *tr);
3737 static void __wakeup_reset(struct trace_array *tr);
38
+static int start_func_tracer(struct trace_array *tr, int graph);
39
+static void stop_func_tracer(struct trace_array *tr, int graph);
3840
3941 static int save_flags;
4042
4143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
42
-static int wakeup_display_graph(struct trace_array *tr, int set);
4344 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
4445 #else
45
-static inline int wakeup_display_graph(struct trace_array *tr, int set)
46
-{
47
- return 0;
48
-}
4946 # define is_graph(tr) false
5047 #endif
5148
52
-
5349 #ifdef CONFIG_FUNCTION_TRACER
54
-
55
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
56
-static void wakeup_graph_return(struct ftrace_graph_ret *trace);
5750
5851 static bool function_enabled;
5952
....@@ -74,7 +67,7 @@
7467 static int
7568 func_prolog_preempt_disable(struct trace_array *tr,
7669 struct trace_array_cpu **data,
77
- int *pc)
70
+ unsigned int *trace_ctx)
7871 {
7972 long disabled;
8073 int cpu;
....@@ -82,14 +75,14 @@
8275 if (likely(!wakeup_task))
8376 return 0;
8477
85
- *pc = preempt_count();
78
+ *trace_ctx = tracing_gen_ctx();
8679 preempt_disable_notrace();
8780
8881 cpu = raw_smp_processor_id();
8982 if (cpu != wakeup_current_cpu)
9083 goto out_enable;
9184
92
- *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
85
+ *data = per_cpu_ptr(tr->array_buffer.data, cpu);
9386 disabled = atomic_inc_return(&(*data)->disabled);
9487 if (unlikely(disabled != 1))
9588 goto out;
....@@ -104,6 +97,113 @@
10497 return 0;
10598 }
10699
100
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
101
+
102
+static int wakeup_display_graph(struct trace_array *tr, int set)
103
+{
104
+ if (!(is_graph(tr) ^ set))
105
+ return 0;
106
+
107
+ stop_func_tracer(tr, !set);
108
+
109
+ wakeup_reset(wakeup_trace);
110
+ tr->max_latency = 0;
111
+
112
+ return start_func_tracer(tr, set);
113
+}
114
+
115
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
116
+{
117
+ struct trace_array *tr = wakeup_trace;
118
+ struct trace_array_cpu *data;
119
+ unsigned int trace_ctx;
120
+ int ret = 0;
121
+
122
+ if (ftrace_graph_ignore_func(trace))
123
+ return 0;
124
+ /*
125
+ * Do not trace a function if it's filtered by set_graph_notrace.
126
+ * Make the index of ret stack negative to indicate that it should
127
+ * ignore further functions. But it needs its own ret stack entry
128
+ * to recover the original index in order to continue tracing after
129
+ * returning from the function.
130
+ */
131
+ if (ftrace_graph_notrace_addr(trace->func))
132
+ return 1;
133
+
134
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
135
+ return 0;
136
+
137
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
138
+ atomic_dec(&data->disabled);
139
+ preempt_enable_notrace();
140
+
141
+ return ret;
142
+}
143
+
144
+static void wakeup_graph_return(struct ftrace_graph_ret *trace)
145
+{
146
+ struct trace_array *tr = wakeup_trace;
147
+ struct trace_array_cpu *data;
148
+ unsigned int trace_ctx;
149
+
150
+ ftrace_graph_addr_finish(trace);
151
+
152
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
153
+ return;
154
+
155
+ __trace_graph_return(tr, trace, trace_ctx);
156
+ atomic_dec(&data->disabled);
157
+
158
+ preempt_enable_notrace();
159
+ return;
160
+}
161
+
162
+static struct fgraph_ops fgraph_wakeup_ops = {
163
+ .entryfunc = &wakeup_graph_entry,
164
+ .retfunc = &wakeup_graph_return,
165
+};
166
+
167
+static void wakeup_trace_open(struct trace_iterator *iter)
168
+{
169
+ if (is_graph(iter->tr))
170
+ graph_trace_open(iter);
171
+}
172
+
173
+static void wakeup_trace_close(struct trace_iterator *iter)
174
+{
175
+ if (iter->private)
176
+ graph_trace_close(iter);
177
+}
178
+
179
+#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
180
+ TRACE_GRAPH_PRINT_CPU | \
181
+ TRACE_GRAPH_PRINT_REL_TIME | \
182
+ TRACE_GRAPH_PRINT_DURATION | \
183
+ TRACE_GRAPH_PRINT_OVERHEAD | \
184
+ TRACE_GRAPH_PRINT_IRQS)
185
+
186
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
187
+{
188
+ /*
189
+ * In graph mode call the graph tracer output function,
190
+ * otherwise go with the TRACE_FN event handler
191
+ */
192
+ if (is_graph(iter->tr))
193
+ return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
194
+
195
+ return TRACE_TYPE_UNHANDLED;
196
+}
197
+
198
+static void wakeup_print_header(struct seq_file *s)
199
+{
200
+ if (is_graph(wakeup_trace))
201
+ print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
202
+ else
203
+ trace_default_header(s);
204
+}
205
+#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
206
+
107207 /*
108208 * wakeup uses its own tracer function to keep the overhead down:
109209 */
....@@ -114,13 +214,13 @@
114214 struct trace_array *tr = wakeup_trace;
115215 struct trace_array_cpu *data;
116216 unsigned long flags;
117
- int pc;
217
+ unsigned int trace_ctx;
118218
119
- if (!func_prolog_preempt_disable(tr, &data, &pc))
219
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
120220 return;
121221
122222 local_irq_save(flags);
123
- trace_function(tr, ip, parent_ip, flags, pc);
223
+ trace_function(tr, ip, parent_ip, trace_ctx);
124224 local_irq_restore(flags);
125225
126226 atomic_dec(&data->disabled);
....@@ -136,8 +236,7 @@
136236 return 0;
137237
138238 if (graph)
139
- ret = register_ftrace_graph(&wakeup_graph_return,
140
- &wakeup_graph_entry);
239
+ ret = register_ftrace_graph(&fgraph_wakeup_ops);
141240 else
142241 ret = register_ftrace_function(tr->ops);
143242
....@@ -153,7 +252,7 @@
153252 return;
154253
155254 if (graph)
156
- unregister_ftrace_graph();
255
+ unregister_ftrace_graph(&fgraph_wakeup_ops);
157256 else
158257 unregister_ftrace_function(tr->ops);
159258
....@@ -171,7 +270,7 @@
171270 unregister_wakeup_function(tr, is_graph(tr));
172271 return 1;
173272 }
174
-#else
273
+#else /* CONFIG_FUNCTION_TRACER */
175274 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
176275 {
177276 return 0;
....@@ -181,7 +280,33 @@
181280 {
182281 return 0;
183282 }
184
-#endif /* CONFIG_FUNCTION_TRACER */
283
+#endif /* else CONFIG_FUNCTION_TRACER */
284
+
285
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
286
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
287
+{
288
+ return TRACE_TYPE_UNHANDLED;
289
+}
290
+
291
+static void wakeup_trace_open(struct trace_iterator *iter) { }
292
+static void wakeup_trace_close(struct trace_iterator *iter) { }
293
+
294
+static void wakeup_print_header(struct seq_file *s)
295
+{
296
+ trace_default_header(s);
297
+}
298
+#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
299
+
300
+static void
301
+__trace_function(struct trace_array *tr,
302
+ unsigned long ip, unsigned long parent_ip,
303
+ unsigned int trace_ctx)
304
+{
305
+ if (is_graph(tr))
306
+ trace_graph_function(tr, ip, parent_ip, trace_ctx);
307
+ else
308
+ trace_function(tr, ip, parent_ip, trace_ctx);
309
+}
185310
186311 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187312 {
....@@ -219,145 +344,6 @@
219344 unregister_wakeup_function(tr, graph);
220345 }
221346
222
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
223
-static int wakeup_display_graph(struct trace_array *tr, int set)
224
-{
225
- if (!(is_graph(tr) ^ set))
226
- return 0;
227
-
228
- stop_func_tracer(tr, !set);
229
-
230
- wakeup_reset(wakeup_trace);
231
- tr->max_latency = 0;
232
-
233
- return start_func_tracer(tr, set);
234
-}
235
-
236
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
237
-{
238
- struct trace_array *tr = wakeup_trace;
239
- struct trace_array_cpu *data;
240
- unsigned long flags;
241
- int pc, ret = 0;
242
-
243
- if (ftrace_graph_ignore_func(trace))
244
- return 0;
245
- /*
246
- * Do not trace a function if it's filtered by set_graph_notrace.
247
- * Make the index of ret stack negative to indicate that it should
248
- * ignore further functions. But it needs its own ret stack entry
249
- * to recover the original index in order to continue tracing after
250
- * returning from the function.
251
- */
252
- if (ftrace_graph_notrace_addr(trace->func))
253
- return 1;
254
-
255
- if (!func_prolog_preempt_disable(tr, &data, &pc))
256
- return 0;
257
-
258
- local_save_flags(flags);
259
- ret = __trace_graph_entry(tr, trace, flags, pc);
260
- atomic_dec(&data->disabled);
261
- preempt_enable_notrace();
262
-
263
- return ret;
264
-}
265
-
266
-static void wakeup_graph_return(struct ftrace_graph_ret *trace)
267
-{
268
- struct trace_array *tr = wakeup_trace;
269
- struct trace_array_cpu *data;
270
- unsigned long flags;
271
- int pc;
272
-
273
- ftrace_graph_addr_finish(trace);
274
-
275
- if (!func_prolog_preempt_disable(tr, &data, &pc))
276
- return;
277
-
278
- local_save_flags(flags);
279
- __trace_graph_return(tr, trace, flags, pc);
280
- atomic_dec(&data->disabled);
281
-
282
- preempt_enable_notrace();
283
- return;
284
-}
285
-
286
-static void wakeup_trace_open(struct trace_iterator *iter)
287
-{
288
- if (is_graph(iter->tr))
289
- graph_trace_open(iter);
290
-}
291
-
292
-static void wakeup_trace_close(struct trace_iterator *iter)
293
-{
294
- if (iter->private)
295
- graph_trace_close(iter);
296
-}
297
-
298
-#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
299
- TRACE_GRAPH_PRINT_ABS_TIME | \
300
- TRACE_GRAPH_PRINT_DURATION)
301
-
302
-static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
303
-{
304
- /*
305
- * In graph mode call the graph tracer output function,
306
- * otherwise go with the TRACE_FN event handler
307
- */
308
- if (is_graph(iter->tr))
309
- return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
310
-
311
- return TRACE_TYPE_UNHANDLED;
312
-}
313
-
314
-static void wakeup_print_header(struct seq_file *s)
315
-{
316
- if (is_graph(wakeup_trace))
317
- print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
318
- else
319
- trace_default_header(s);
320
-}
321
-
322
-static void
323
-__trace_function(struct trace_array *tr,
324
- unsigned long ip, unsigned long parent_ip,
325
- unsigned long flags, int pc)
326
-{
327
- if (is_graph(tr))
328
- trace_graph_function(tr, ip, parent_ip, flags, pc);
329
- else
330
- trace_function(tr, ip, parent_ip, flags, pc);
331
-}
332
-#else
333
-#define __trace_function trace_function
334
-
335
-static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
336
-{
337
- return TRACE_TYPE_UNHANDLED;
338
-}
339
-
340
-static void wakeup_trace_open(struct trace_iterator *iter) { }
341
-static void wakeup_trace_close(struct trace_iterator *iter) { }
342
-
343
-#ifdef CONFIG_FUNCTION_TRACER
344
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
345
-{
346
- return -1;
347
-}
348
-static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
349
-static void wakeup_print_header(struct seq_file *s)
350
-{
351
- trace_default_header(s);
352
-}
353
-#else
354
-static void wakeup_print_header(struct seq_file *s)
355
-{
356
- trace_latency_header(s);
357
-}
358
-#endif /* CONFIG_FUNCTION_TRACER */
359
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
360
-
361347 /*
362348 * Should this new latency be reported/recorded?
363349 */
....@@ -386,15 +372,15 @@
386372 tracing_sched_switch_trace(struct trace_array *tr,
387373 struct task_struct *prev,
388374 struct task_struct *next,
389
- unsigned long flags, int pc)
375
+ unsigned int trace_ctx)
390376 {
391377 struct trace_event_call *call = &event_context_switch;
392
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
378
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
393379 struct ring_buffer_event *event;
394380 struct ctx_switch_entry *entry;
395381
396382 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
397
- sizeof(*entry), flags, pc);
383
+ sizeof(*entry), trace_ctx);
398384 if (!event)
399385 return;
400386 entry = ring_buffer_event_data(event);
....@@ -407,22 +393,22 @@
407393 entry->next_cpu = task_cpu(next);
408394
409395 if (!call_filter_check_discard(call, entry, buffer, event))
410
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
396
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
411397 }
412398
413399 static void
414400 tracing_sched_wakeup_trace(struct trace_array *tr,
415401 struct task_struct *wakee,
416402 struct task_struct *curr,
417
- unsigned long flags, int pc)
403
+ unsigned int trace_ctx)
418404 {
419405 struct trace_event_call *call = &event_wakeup;
420406 struct ring_buffer_event *event;
421407 struct ctx_switch_entry *entry;
422
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
408
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
423409
424410 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
425
- sizeof(*entry), flags, pc);
411
+ sizeof(*entry), trace_ctx);
426412 if (!event)
427413 return;
428414 entry = ring_buffer_event_data(event);
....@@ -435,7 +421,7 @@
435421 entry->next_cpu = task_cpu(wakee);
436422
437423 if (!call_filter_check_discard(call, entry, buffer, event))
438
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
424
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
439425 }
440426
441427 static void notrace
....@@ -447,7 +433,7 @@
447433 unsigned long flags;
448434 long disabled;
449435 int cpu;
450
- int pc;
436
+ unsigned int trace_ctx;
451437
452438 tracing_record_cmdline(prev);
453439
....@@ -466,15 +452,15 @@
466452 if (next != wakeup_task)
467453 return;
468454
469
- pc = preempt_count();
470
-
471455 /* disable local data, not wakeup_cpu data */
472456 cpu = raw_smp_processor_id();
473
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
457
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
474458 if (likely(disabled != 1))
475459 goto out;
476460
477461 local_irq_save(flags);
462
+ trace_ctx = tracing_gen_ctx_flags(flags);
463
+
478464 arch_spin_lock(&wakeup_lock);
479465
480466 /* We could race with grabbing wakeup_lock */
....@@ -482,10 +468,11 @@
482468 goto out_unlock;
483469
484470 /* The task we are waiting for is waking up */
485
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
471
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
486472
487
- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
488
- tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
473
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
474
+ tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
475
+ __trace_stack(wakeup_trace, trace_ctx, 0);
489476
490477 T0 = data->preempt_timestamp;
491478 T1 = ftrace_now(cpu);
....@@ -496,7 +483,7 @@
496483
497484 if (likely(!is_tracing_stopped())) {
498485 wakeup_trace->max_latency = delta;
499
- update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
486
+ update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
500487 }
501488
502489 out_unlock:
....@@ -504,7 +491,7 @@
504491 arch_spin_unlock(&wakeup_lock);
505492 local_irq_restore(flags);
506493 out:
507
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
494
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
508495 }
509496
510497 static void __wakeup_reset(struct trace_array *tr)
....@@ -523,7 +510,7 @@
523510 {
524511 unsigned long flags;
525512
526
- tracing_reset_online_cpus(&tr->trace_buffer);
513
+ tracing_reset_online_cpus(&tr->array_buffer);
527514
528515 local_irq_save(flags);
529516 arch_spin_lock(&wakeup_lock);
....@@ -537,9 +524,8 @@
537524 {
538525 struct trace_array_cpu *data;
539526 int cpu = smp_processor_id();
540
- unsigned long flags;
541527 long disabled;
542
- int pc;
528
+ unsigned int trace_ctx;
543529
544530 if (likely(!tracer_enabled))
545531 return;
....@@ -560,10 +546,11 @@
560546 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
561547 return;
562548
563
- pc = preempt_count();
564
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
549
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
565550 if (unlikely(disabled != 1))
566551 goto out;
552
+
553
+ trace_ctx = tracing_gen_ctx();
567554
568555 /* interrupts should be off from try_to_wake_up */
569556 arch_spin_lock(&wakeup_lock);
....@@ -589,26 +576,24 @@
589576 else
590577 tracing_dl = 0;
591578
592
- wakeup_task = p;
593
- get_task_struct(wakeup_task);
579
+ wakeup_task = get_task_struct(p);
594580
595
- local_save_flags(flags);
596
-
597
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
581
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
598582 data->preempt_timestamp = ftrace_now(cpu);
599
- tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
583
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
584
+ __trace_stack(wakeup_trace, trace_ctx, 0);
600585
601586 /*
602587 * We must be careful in using CALLER_ADDR2. But since wake_up
603588 * is not called by an assembly function (where as schedule is)
604589 * it should be safe to use it here.
605590 */
606
- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
591
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
607592
608593 out_locked:
609594 arch_spin_unlock(&wakeup_lock);
610595 out:
611
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
596
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
612597 }
613598
614599 static void start_wakeup_tracer(struct trace_array *tr)