hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/kernel/trace/trace_sched_wakeup.c
....@@ -35,25 +35,18 @@
3535
3636 static void wakeup_reset(struct trace_array *tr);
3737 static void __wakeup_reset(struct trace_array *tr);
38
+static int start_func_tracer(struct trace_array *tr, int graph);
39
+static void stop_func_tracer(struct trace_array *tr, int graph);
3840
3941 static int save_flags;
4042
4143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
42
-static int wakeup_display_graph(struct trace_array *tr, int set);
4344 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
4445 #else
45
-static inline int wakeup_display_graph(struct trace_array *tr, int set)
46
-{
47
- return 0;
48
-}
4946 # define is_graph(tr) false
5047 #endif
5148
52
-
5349 #ifdef CONFIG_FUNCTION_TRACER
54
-
55
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
56
-static void wakeup_graph_return(struct ftrace_graph_ret *trace);
5750
5851 static bool function_enabled;
5952
....@@ -89,7 +82,7 @@
8982 if (cpu != wakeup_current_cpu)
9083 goto out_enable;
9184
92
- *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
85
+ *data = per_cpu_ptr(tr->array_buffer.data, cpu);
9386 disabled = atomic_inc_return(&(*data)->disabled);
9487 if (unlikely(disabled != 1))
9588 goto out;
....@@ -104,122 +97,8 @@
10497 return 0;
10598 }
10699
107
-/*
108
- * wakeup uses its own tracer function to keep the overhead down:
109
- */
110
-static void
111
-wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
112
- struct ftrace_ops *op, struct pt_regs *pt_regs)
113
-{
114
- struct trace_array *tr = wakeup_trace;
115
- struct trace_array_cpu *data;
116
- unsigned long flags;
117
- int pc;
118
-
119
- if (!func_prolog_preempt_disable(tr, &data, &pc))
120
- return;
121
-
122
- local_irq_save(flags);
123
- trace_function(tr, ip, parent_ip, flags, pc);
124
- local_irq_restore(flags);
125
-
126
- atomic_dec(&data->disabled);
127
- preempt_enable_notrace();
128
-}
129
-
130
-static int register_wakeup_function(struct trace_array *tr, int graph, int set)
131
-{
132
- int ret;
133
-
134
- /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
135
- if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
136
- return 0;
137
-
138
- if (graph)
139
- ret = register_ftrace_graph(&wakeup_graph_return,
140
- &wakeup_graph_entry);
141
- else
142
- ret = register_ftrace_function(tr->ops);
143
-
144
- if (!ret)
145
- function_enabled = true;
146
-
147
- return ret;
148
-}
149
-
150
-static void unregister_wakeup_function(struct trace_array *tr, int graph)
151
-{
152
- if (!function_enabled)
153
- return;
154
-
155
- if (graph)
156
- unregister_ftrace_graph();
157
- else
158
- unregister_ftrace_function(tr->ops);
159
-
160
- function_enabled = false;
161
-}
162
-
163
-static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
164
-{
165
- if (!(mask & TRACE_ITER_FUNCTION))
166
- return 0;
167
-
168
- if (set)
169
- register_wakeup_function(tr, is_graph(tr), 1);
170
- else
171
- unregister_wakeup_function(tr, is_graph(tr));
172
- return 1;
173
-}
174
-#else
175
-static int register_wakeup_function(struct trace_array *tr, int graph, int set)
176
-{
177
- return 0;
178
-}
179
-static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
180
-static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
181
-{
182
- return 0;
183
-}
184
-#endif /* CONFIG_FUNCTION_TRACER */
185
-
186
-static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187
-{
188
- struct tracer *tracer = tr->current_trace;
189
-
190
- if (wakeup_function_set(tr, mask, set))
191
- return 0;
192
-
193100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
194
- if (mask & TRACE_ITER_DISPLAY_GRAPH)
195
- return wakeup_display_graph(tr, set);
196
-#endif
197101
198
- return trace_keep_overwrite(tracer, mask, set);
199
-}
200
-
201
-static int start_func_tracer(struct trace_array *tr, int graph)
202
-{
203
- int ret;
204
-
205
- ret = register_wakeup_function(tr, graph, 0);
206
-
207
- if (!ret && tracing_is_enabled())
208
- tracer_enabled = 1;
209
- else
210
- tracer_enabled = 0;
211
-
212
- return ret;
213
-}
214
-
215
-static void stop_func_tracer(struct trace_array *tr, int graph)
216
-{
217
- tracer_enabled = 0;
218
-
219
- unregister_wakeup_function(tr, graph);
220
-}
221
-
222
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
223102 static int wakeup_display_graph(struct trace_array *tr, int set)
224103 {
225104 if (!(is_graph(tr) ^ set))
....@@ -283,10 +162,17 @@
283162 return;
284163 }
285164
165
+static struct fgraph_ops fgraph_wakeup_ops = {
166
+ .entryfunc = &wakeup_graph_entry,
167
+ .retfunc = &wakeup_graph_return,
168
+};
169
+
286170 static void wakeup_trace_open(struct trace_iterator *iter)
287171 {
288172 if (is_graph(iter->tr))
289173 graph_trace_open(iter);
174
+ else
175
+ iter->private = NULL;
290176 }
291177
292178 static void wakeup_trace_close(struct trace_iterator *iter)
....@@ -296,8 +182,11 @@
296182 }
297183
298184 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
299
- TRACE_GRAPH_PRINT_ABS_TIME | \
300
- TRACE_GRAPH_PRINT_DURATION)
185
+ TRACE_GRAPH_PRINT_CPU | \
186
+ TRACE_GRAPH_PRINT_REL_TIME | \
187
+ TRACE_GRAPH_PRINT_DURATION | \
188
+ TRACE_GRAPH_PRINT_OVERHEAD | \
189
+ TRACE_GRAPH_PRINT_IRQS)
301190
302191 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
303192 {
....@@ -318,6 +207,100 @@
318207 else
319208 trace_default_header(s);
320209 }
210
+#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
211
+
212
+/*
213
+ * wakeup uses its own tracer function to keep the overhead down:
214
+ */
215
+static void
216
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
217
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
218
+{
219
+ struct trace_array *tr = wakeup_trace;
220
+ struct trace_array_cpu *data;
221
+ unsigned long flags;
222
+ int pc;
223
+
224
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
225
+ return;
226
+
227
+ local_irq_save(flags);
228
+ trace_function(tr, ip, parent_ip, flags, pc);
229
+ local_irq_restore(flags);
230
+
231
+ atomic_dec(&data->disabled);
232
+ preempt_enable_notrace();
233
+}
234
+
235
+static int register_wakeup_function(struct trace_array *tr, int graph, int set)
236
+{
237
+ int ret;
238
+
239
+ /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
240
+ if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
241
+ return 0;
242
+
243
+ if (graph)
244
+ ret = register_ftrace_graph(&fgraph_wakeup_ops);
245
+ else
246
+ ret = register_ftrace_function(tr->ops);
247
+
248
+ if (!ret)
249
+ function_enabled = true;
250
+
251
+ return ret;
252
+}
253
+
254
+static void unregister_wakeup_function(struct trace_array *tr, int graph)
255
+{
256
+ if (!function_enabled)
257
+ return;
258
+
259
+ if (graph)
260
+ unregister_ftrace_graph(&fgraph_wakeup_ops);
261
+ else
262
+ unregister_ftrace_function(tr->ops);
263
+
264
+ function_enabled = false;
265
+}
266
+
267
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
268
+{
269
+ if (!(mask & TRACE_ITER_FUNCTION))
270
+ return 0;
271
+
272
+ if (set)
273
+ register_wakeup_function(tr, is_graph(tr), 1);
274
+ else
275
+ unregister_wakeup_function(tr, is_graph(tr));
276
+ return 1;
277
+}
278
+#else /* CONFIG_FUNCTION_TRACER */
279
+static int register_wakeup_function(struct trace_array *tr, int graph, int set)
280
+{
281
+ return 0;
282
+}
283
+static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
284
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
285
+{
286
+ return 0;
287
+}
288
+#endif /* else CONFIG_FUNCTION_TRACER */
289
+
290
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
291
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
292
+{
293
+ return TRACE_TYPE_UNHANDLED;
294
+}
295
+
296
+static void wakeup_trace_open(struct trace_iterator *iter) { }
297
+static void wakeup_trace_close(struct trace_iterator *iter) { }
298
+
299
+static void wakeup_print_header(struct seq_file *s)
300
+{
301
+ trace_default_header(s);
302
+}
303
+#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
321304
322305 static void
323306 __trace_function(struct trace_array *tr,
....@@ -329,34 +312,42 @@
329312 else
330313 trace_function(tr, ip, parent_ip, flags, pc);
331314 }
332
-#else
333
-#define __trace_function trace_function
334315
335
-static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
316
+static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
336317 {
337
- return TRACE_TYPE_UNHANDLED;
318
+ struct tracer *tracer = tr->current_trace;
319
+
320
+ if (wakeup_function_set(tr, mask, set))
321
+ return 0;
322
+
323
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
324
+ if (mask & TRACE_ITER_DISPLAY_GRAPH)
325
+ return wakeup_display_graph(tr, set);
326
+#endif
327
+
328
+ return trace_keep_overwrite(tracer, mask, set);
338329 }
339330
340
-static void wakeup_trace_open(struct trace_iterator *iter) { }
341
-static void wakeup_trace_close(struct trace_iterator *iter) { }
331
+static int start_func_tracer(struct trace_array *tr, int graph)
332
+{
333
+ int ret;
342334
343
-#ifdef CONFIG_FUNCTION_TRACER
344
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
345
-{
346
- return -1;
335
+ ret = register_wakeup_function(tr, graph, 0);
336
+
337
+ if (!ret && tracing_is_enabled())
338
+ tracer_enabled = 1;
339
+ else
340
+ tracer_enabled = 0;
341
+
342
+ return ret;
347343 }
348
-static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
349
-static void wakeup_print_header(struct seq_file *s)
344
+
345
+static void stop_func_tracer(struct trace_array *tr, int graph)
350346 {
351
- trace_default_header(s);
347
+ tracer_enabled = 0;
348
+
349
+ unregister_wakeup_function(tr, graph);
352350 }
353
-#else
354
-static void wakeup_print_header(struct seq_file *s)
355
-{
356
- trace_latency_header(s);
357
-}
358
-#endif /* CONFIG_FUNCTION_TRACER */
359
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
360351
361352 /*
362353 * Should this new latency be reported/recorded?
....@@ -389,7 +380,7 @@
389380 unsigned long flags, int pc)
390381 {
391382 struct trace_event_call *call = &event_context_switch;
392
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
383
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
393384 struct ring_buffer_event *event;
394385 struct ctx_switch_entry *entry;
395386
....@@ -419,7 +410,7 @@
419410 struct trace_event_call *call = &event_wakeup;
420411 struct ring_buffer_event *event;
421412 struct ctx_switch_entry *entry;
422
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
413
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
423414
424415 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
425416 sizeof(*entry), flags, pc);
....@@ -470,7 +461,7 @@
470461
471462 /* disable local data, not wakeup_cpu data */
472463 cpu = raw_smp_processor_id();
473
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
464
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
474465 if (likely(disabled != 1))
475466 goto out;
476467
....@@ -482,10 +473,11 @@
482473 goto out_unlock;
483474
484475 /* The task we are waiting for is waking up */
485
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
476
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
486477
487478 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
488479 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
480
+ __trace_stack(wakeup_trace, flags, 0, pc);
489481
490482 T0 = data->preempt_timestamp;
491483 T1 = ftrace_now(cpu);
....@@ -496,7 +488,7 @@
496488
497489 if (likely(!is_tracing_stopped())) {
498490 wakeup_trace->max_latency = delta;
499
- update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
491
+ update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
500492 }
501493
502494 out_unlock:
....@@ -504,7 +496,7 @@
504496 arch_spin_unlock(&wakeup_lock);
505497 local_irq_restore(flags);
506498 out:
507
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
499
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
508500 }
509501
510502 static void __wakeup_reset(struct trace_array *tr)
....@@ -523,7 +515,7 @@
523515 {
524516 unsigned long flags;
525517
526
- tracing_reset_online_cpus(&tr->trace_buffer);
518
+ tracing_reset_online_cpus(&tr->array_buffer);
527519
528520 local_irq_save(flags);
529521 arch_spin_lock(&wakeup_lock);
....@@ -561,7 +553,7 @@
561553 return;
562554
563555 pc = preempt_count();
564
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
556
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
565557 if (unlikely(disabled != 1))
566558 goto out;
567559
....@@ -589,14 +581,14 @@
589581 else
590582 tracing_dl = 0;
591583
592
- wakeup_task = p;
593
- get_task_struct(wakeup_task);
584
+ wakeup_task = get_task_struct(p);
594585
595586 local_save_flags(flags);
596587
597
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
588
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
598589 data->preempt_timestamp = ftrace_now(cpu);
599590 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
591
+ __trace_stack(wakeup_trace, flags, 0, pc);
600592
601593 /*
602594 * We must be careful in using CALLER_ADDR2. But since wake_up
....@@ -608,7 +600,7 @@
608600 out_locked:
609601 arch_spin_unlock(&wakeup_lock);
610602 out:
611
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
603
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
612604 }
613605
614606 static void start_wakeup_tracer(struct trace_array *tr)