forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/kernel/trace/trace_sched_wakeup.c
....@@ -35,25 +35,18 @@
3535
3636 static void wakeup_reset(struct trace_array *tr);
3737 static void __wakeup_reset(struct trace_array *tr);
38
+static int start_func_tracer(struct trace_array *tr, int graph);
39
+static void stop_func_tracer(struct trace_array *tr, int graph);
3840
3941 static int save_flags;
4042
4143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
42
-static int wakeup_display_graph(struct trace_array *tr, int set);
4344 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
4445 #else
45
-static inline int wakeup_display_graph(struct trace_array *tr, int set)
46
-{
47
- return 0;
48
-}
4946 # define is_graph(tr) false
5047 #endif
5148
52
-
5349 #ifdef CONFIG_FUNCTION_TRACER
54
-
55
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
56
-static void wakeup_graph_return(struct ftrace_graph_ret *trace);
5750
5851 static bool function_enabled;
5952
....@@ -89,7 +82,7 @@
8982 if (cpu != wakeup_current_cpu)
9083 goto out_enable;
9184
92
- *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
85
+ *data = per_cpu_ptr(tr->array_buffer.data, cpu);
9386 disabled = atomic_inc_return(&(*data)->disabled);
9487 if (unlikely(disabled != 1))
9588 goto out;
....@@ -104,122 +97,8 @@
10497 return 0;
10598 }
10699
107
-/*
108
- * wakeup uses its own tracer function to keep the overhead down:
109
- */
110
-static void
111
-wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
112
- struct ftrace_ops *op, struct pt_regs *pt_regs)
113
-{
114
- struct trace_array *tr = wakeup_trace;
115
- struct trace_array_cpu *data;
116
- unsigned long flags;
117
- int pc;
118
-
119
- if (!func_prolog_preempt_disable(tr, &data, &pc))
120
- return;
121
-
122
- local_irq_save(flags);
123
- trace_function(tr, ip, parent_ip, flags, pc);
124
- local_irq_restore(flags);
125
-
126
- atomic_dec(&data->disabled);
127
- preempt_enable_notrace();
128
-}
129
-
130
-static int register_wakeup_function(struct trace_array *tr, int graph, int set)
131
-{
132
- int ret;
133
-
134
- /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
135
- if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
136
- return 0;
137
-
138
- if (graph)
139
- ret = register_ftrace_graph(&wakeup_graph_return,
140
- &wakeup_graph_entry);
141
- else
142
- ret = register_ftrace_function(tr->ops);
143
-
144
- if (!ret)
145
- function_enabled = true;
146
-
147
- return ret;
148
-}
149
-
150
-static void unregister_wakeup_function(struct trace_array *tr, int graph)
151
-{
152
- if (!function_enabled)
153
- return;
154
-
155
- if (graph)
156
- unregister_ftrace_graph();
157
- else
158
- unregister_ftrace_function(tr->ops);
159
-
160
- function_enabled = false;
161
-}
162
-
163
-static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
164
-{
165
- if (!(mask & TRACE_ITER_FUNCTION))
166
- return 0;
167
-
168
- if (set)
169
- register_wakeup_function(tr, is_graph(tr), 1);
170
- else
171
- unregister_wakeup_function(tr, is_graph(tr));
172
- return 1;
173
-}
174
-#else
175
-static int register_wakeup_function(struct trace_array *tr, int graph, int set)
176
-{
177
- return 0;
178
-}
179
-static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
180
-static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
181
-{
182
- return 0;
183
-}
184
-#endif /* CONFIG_FUNCTION_TRACER */
185
-
186
-static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187
-{
188
- struct tracer *tracer = tr->current_trace;
189
-
190
- if (wakeup_function_set(tr, mask, set))
191
- return 0;
192
-
193100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
194
- if (mask & TRACE_ITER_DISPLAY_GRAPH)
195
- return wakeup_display_graph(tr, set);
196
-#endif
197101
198
- return trace_keep_overwrite(tracer, mask, set);
199
-}
200
-
201
-static int start_func_tracer(struct trace_array *tr, int graph)
202
-{
203
- int ret;
204
-
205
- ret = register_wakeup_function(tr, graph, 0);
206
-
207
- if (!ret && tracing_is_enabled())
208
- tracer_enabled = 1;
209
- else
210
- tracer_enabled = 0;
211
-
212
- return ret;
213
-}
214
-
215
-static void stop_func_tracer(struct trace_array *tr, int graph)
216
-{
217
- tracer_enabled = 0;
218
-
219
- unregister_wakeup_function(tr, graph);
220
-}
221
-
222
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
223102 static int wakeup_display_graph(struct trace_array *tr, int set)
224103 {
225104 if (!(is_graph(tr) ^ set))
....@@ -283,6 +162,11 @@
283162 return;
284163 }
285164
165
+static struct fgraph_ops fgraph_wakeup_ops = {
166
+ .entryfunc = &wakeup_graph_entry,
167
+ .retfunc = &wakeup_graph_return,
168
+};
169
+
286170 static void wakeup_trace_open(struct trace_iterator *iter)
287171 {
288172 if (is_graph(iter->tr))
....@@ -296,8 +180,11 @@
296180 }
297181
298182 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
299
- TRACE_GRAPH_PRINT_ABS_TIME | \
300
- TRACE_GRAPH_PRINT_DURATION)
183
+ TRACE_GRAPH_PRINT_CPU | \
184
+ TRACE_GRAPH_PRINT_REL_TIME | \
185
+ TRACE_GRAPH_PRINT_DURATION | \
186
+ TRACE_GRAPH_PRINT_OVERHEAD | \
187
+ TRACE_GRAPH_PRINT_IRQS)
301188
302189 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
303190 {
....@@ -318,6 +205,100 @@
318205 else
319206 trace_default_header(s);
320207 }
208
+#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
209
+
210
+/*
211
+ * wakeup uses its own tracer function to keep the overhead down:
212
+ */
213
+static void
214
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
215
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
216
+{
217
+ struct trace_array *tr = wakeup_trace;
218
+ struct trace_array_cpu *data;
219
+ unsigned long flags;
220
+ int pc;
221
+
222
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
223
+ return;
224
+
225
+ local_irq_save(flags);
226
+ trace_function(tr, ip, parent_ip, flags, pc);
227
+ local_irq_restore(flags);
228
+
229
+ atomic_dec(&data->disabled);
230
+ preempt_enable_notrace();
231
+}
232
+
233
+static int register_wakeup_function(struct trace_array *tr, int graph, int set)
234
+{
235
+ int ret;
236
+
237
+ /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
238
+ if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
239
+ return 0;
240
+
241
+ if (graph)
242
+ ret = register_ftrace_graph(&fgraph_wakeup_ops);
243
+ else
244
+ ret = register_ftrace_function(tr->ops);
245
+
246
+ if (!ret)
247
+ function_enabled = true;
248
+
249
+ return ret;
250
+}
251
+
252
+static void unregister_wakeup_function(struct trace_array *tr, int graph)
253
+{
254
+ if (!function_enabled)
255
+ return;
256
+
257
+ if (graph)
258
+ unregister_ftrace_graph(&fgraph_wakeup_ops);
259
+ else
260
+ unregister_ftrace_function(tr->ops);
261
+
262
+ function_enabled = false;
263
+}
264
+
265
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
266
+{
267
+ if (!(mask & TRACE_ITER_FUNCTION))
268
+ return 0;
269
+
270
+ if (set)
271
+ register_wakeup_function(tr, is_graph(tr), 1);
272
+ else
273
+ unregister_wakeup_function(tr, is_graph(tr));
274
+ return 1;
275
+}
276
+#else /* CONFIG_FUNCTION_TRACER */
277
+static int register_wakeup_function(struct trace_array *tr, int graph, int set)
278
+{
279
+ return 0;
280
+}
281
+static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
282
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
283
+{
284
+ return 0;
285
+}
286
+#endif /* else CONFIG_FUNCTION_TRACER */
287
+
288
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
289
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
290
+{
291
+ return TRACE_TYPE_UNHANDLED;
292
+}
293
+
294
+static void wakeup_trace_open(struct trace_iterator *iter) { }
295
+static void wakeup_trace_close(struct trace_iterator *iter) { }
296
+
297
+static void wakeup_print_header(struct seq_file *s)
298
+{
299
+ trace_default_header(s);
300
+}
301
+#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
321302
322303 static void
323304 __trace_function(struct trace_array *tr,
....@@ -329,34 +310,42 @@
329310 else
330311 trace_function(tr, ip, parent_ip, flags, pc);
331312 }
332
-#else
333
-#define __trace_function trace_function
334313
335
-static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
314
+static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
336315 {
337
- return TRACE_TYPE_UNHANDLED;
316
+ struct tracer *tracer = tr->current_trace;
317
+
318
+ if (wakeup_function_set(tr, mask, set))
319
+ return 0;
320
+
321
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
322
+ if (mask & TRACE_ITER_DISPLAY_GRAPH)
323
+ return wakeup_display_graph(tr, set);
324
+#endif
325
+
326
+ return trace_keep_overwrite(tracer, mask, set);
338327 }
339328
340
-static void wakeup_trace_open(struct trace_iterator *iter) { }
341
-static void wakeup_trace_close(struct trace_iterator *iter) { }
329
+static int start_func_tracer(struct trace_array *tr, int graph)
330
+{
331
+ int ret;
342332
343
-#ifdef CONFIG_FUNCTION_TRACER
344
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
345
-{
346
- return -1;
333
+ ret = register_wakeup_function(tr, graph, 0);
334
+
335
+ if (!ret && tracing_is_enabled())
336
+ tracer_enabled = 1;
337
+ else
338
+ tracer_enabled = 0;
339
+
340
+ return ret;
347341 }
348
-static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
349
-static void wakeup_print_header(struct seq_file *s)
342
+
343
+static void stop_func_tracer(struct trace_array *tr, int graph)
350344 {
351
- trace_default_header(s);
345
+ tracer_enabled = 0;
346
+
347
+ unregister_wakeup_function(tr, graph);
352348 }
353
-#else
354
-static void wakeup_print_header(struct seq_file *s)
355
-{
356
- trace_latency_header(s);
357
-}
358
-#endif /* CONFIG_FUNCTION_TRACER */
359
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
360349
361350 /*
362351 * Should this new latency be reported/recorded?
....@@ -389,7 +378,7 @@
389378 unsigned long flags, int pc)
390379 {
391380 struct trace_event_call *call = &event_context_switch;
392
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
381
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
393382 struct ring_buffer_event *event;
394383 struct ctx_switch_entry *entry;
395384
....@@ -419,7 +408,7 @@
419408 struct trace_event_call *call = &event_wakeup;
420409 struct ring_buffer_event *event;
421410 struct ctx_switch_entry *entry;
422
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
411
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
423412
424413 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
425414 sizeof(*entry), flags, pc);
....@@ -470,7 +459,7 @@
470459
471460 /* disable local data, not wakeup_cpu data */
472461 cpu = raw_smp_processor_id();
473
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
462
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
474463 if (likely(disabled != 1))
475464 goto out;
476465
....@@ -482,10 +471,11 @@
482471 goto out_unlock;
483472
484473 /* The task we are waiting for is waking up */
485
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
474
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
486475
487476 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
488477 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
478
+ __trace_stack(wakeup_trace, flags, 0, pc);
489479
490480 T0 = data->preempt_timestamp;
491481 T1 = ftrace_now(cpu);
....@@ -496,7 +486,7 @@
496486
497487 if (likely(!is_tracing_stopped())) {
498488 wakeup_trace->max_latency = delta;
499
- update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
489
+ update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
500490 }
501491
502492 out_unlock:
....@@ -504,7 +494,7 @@
504494 arch_spin_unlock(&wakeup_lock);
505495 local_irq_restore(flags);
506496 out:
507
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
497
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
508498 }
509499
510500 static void __wakeup_reset(struct trace_array *tr)
....@@ -523,7 +513,7 @@
523513 {
524514 unsigned long flags;
525515
526
- tracing_reset_online_cpus(&tr->trace_buffer);
516
+ tracing_reset_online_cpus(&tr->array_buffer);
527517
528518 local_irq_save(flags);
529519 arch_spin_lock(&wakeup_lock);
....@@ -561,7 +551,7 @@
561551 return;
562552
563553 pc = preempt_count();
564
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
554
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
565555 if (unlikely(disabled != 1))
566556 goto out;
567557
....@@ -589,14 +579,14 @@
589579 else
590580 tracing_dl = 0;
591581
592
- wakeup_task = p;
593
- get_task_struct(wakeup_task);
582
+ wakeup_task = get_task_struct(p);
594583
595584 local_save_flags(flags);
596585
597
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
586
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
598587 data->preempt_timestamp = ftrace_now(cpu);
599588 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
589
+ __trace_stack(wakeup_trace, flags, 0, pc);
600590
601591 /*
602592 * We must be careful in using CALLER_ADDR2. But since wake_up
....@@ -608,7 +598,7 @@
608598 out_locked:
609599 arch_spin_unlock(&wakeup_lock);
610600 out:
611
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
601
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
612602 }
613603
614604 static void start_wakeup_tracer(struct trace_array *tr)