.. | .. |
---|
35 | 35 | |
---|
36 | 36 | static void wakeup_reset(struct trace_array *tr); |
---|
37 | 37 | static void __wakeup_reset(struct trace_array *tr); |
---|
| 38 | +static int start_func_tracer(struct trace_array *tr, int graph); |
---|
| 39 | +static void stop_func_tracer(struct trace_array *tr, int graph); |
---|
38 | 40 | |
---|
39 | 41 | static int save_flags; |
---|
40 | 42 | |
---|
41 | 43 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
42 | | -static int wakeup_display_graph(struct trace_array *tr, int set); |
---|
43 | 44 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
---|
44 | 45 | #else |
---|
45 | | -static inline int wakeup_display_graph(struct trace_array *tr, int set) |
---|
46 | | -{ |
---|
47 | | - return 0; |
---|
48 | | -} |
---|
49 | 46 | # define is_graph(tr) false |
---|
50 | 47 | #endif |
---|
51 | 48 | |
---|
52 | | - |
---|
53 | 49 | #ifdef CONFIG_FUNCTION_TRACER |
---|
54 | | - |
---|
55 | | -static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
---|
56 | | -static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
---|
57 | 50 | |
---|
58 | 51 | static bool function_enabled; |
---|
59 | 52 | |
---|
.. | .. |
---|
89 | 82 | if (cpu != wakeup_current_cpu) |
---|
90 | 83 | goto out_enable; |
---|
91 | 84 | |
---|
92 | | - *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
---|
| 85 | + *data = per_cpu_ptr(tr->array_buffer.data, cpu); |
---|
93 | 86 | disabled = atomic_inc_return(&(*data)->disabled); |
---|
94 | 87 | if (unlikely(disabled != 1)) |
---|
95 | 88 | goto out; |
---|
.. | .. |
---|
104 | 97 | return 0; |
---|
105 | 98 | } |
---|
106 | 99 | |
---|
107 | | -/* |
---|
108 | | - * wakeup uses its own tracer function to keep the overhead down: |
---|
109 | | - */ |
---|
110 | | -static void |
---|
111 | | -wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, |
---|
112 | | - struct ftrace_ops *op, struct pt_regs *pt_regs) |
---|
113 | | -{ |
---|
114 | | - struct trace_array *tr = wakeup_trace; |
---|
115 | | - struct trace_array_cpu *data; |
---|
116 | | - unsigned long flags; |
---|
117 | | - int pc; |
---|
118 | | - |
---|
119 | | - if (!func_prolog_preempt_disable(tr, &data, &pc)) |
---|
120 | | - return; |
---|
121 | | - |
---|
122 | | - local_irq_save(flags); |
---|
123 | | - trace_function(tr, ip, parent_ip, flags, pc); |
---|
124 | | - local_irq_restore(flags); |
---|
125 | | - |
---|
126 | | - atomic_dec(&data->disabled); |
---|
127 | | - preempt_enable_notrace(); |
---|
128 | | -} |
---|
129 | | - |
---|
130 | | -static int register_wakeup_function(struct trace_array *tr, int graph, int set) |
---|
131 | | -{ |
---|
132 | | - int ret; |
---|
133 | | - |
---|
134 | | - /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
---|
135 | | - if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
---|
136 | | - return 0; |
---|
137 | | - |
---|
138 | | - if (graph) |
---|
139 | | - ret = register_ftrace_graph(&wakeup_graph_return, |
---|
140 | | - &wakeup_graph_entry); |
---|
141 | | - else |
---|
142 | | - ret = register_ftrace_function(tr->ops); |
---|
143 | | - |
---|
144 | | - if (!ret) |
---|
145 | | - function_enabled = true; |
---|
146 | | - |
---|
147 | | - return ret; |
---|
148 | | -} |
---|
149 | | - |
---|
150 | | -static void unregister_wakeup_function(struct trace_array *tr, int graph) |
---|
151 | | -{ |
---|
152 | | - if (!function_enabled) |
---|
153 | | - return; |
---|
154 | | - |
---|
155 | | - if (graph) |
---|
156 | | - unregister_ftrace_graph(); |
---|
157 | | - else |
---|
158 | | - unregister_ftrace_function(tr->ops); |
---|
159 | | - |
---|
160 | | - function_enabled = false; |
---|
161 | | -} |
---|
162 | | - |
---|
163 | | -static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) |
---|
164 | | -{ |
---|
165 | | - if (!(mask & TRACE_ITER_FUNCTION)) |
---|
166 | | - return 0; |
---|
167 | | - |
---|
168 | | - if (set) |
---|
169 | | - register_wakeup_function(tr, is_graph(tr), 1); |
---|
170 | | - else |
---|
171 | | - unregister_wakeup_function(tr, is_graph(tr)); |
---|
172 | | - return 1; |
---|
173 | | -} |
---|
174 | | -#else |
---|
175 | | -static int register_wakeup_function(struct trace_array *tr, int graph, int set) |
---|
176 | | -{ |
---|
177 | | - return 0; |
---|
178 | | -} |
---|
179 | | -static void unregister_wakeup_function(struct trace_array *tr, int graph) { } |
---|
180 | | -static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) |
---|
181 | | -{ |
---|
182 | | - return 0; |
---|
183 | | -} |
---|
184 | | -#endif /* CONFIG_FUNCTION_TRACER */ |
---|
185 | | - |
---|
186 | | -static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
---|
187 | | -{ |
---|
188 | | - struct tracer *tracer = tr->current_trace; |
---|
189 | | - |
---|
190 | | - if (wakeup_function_set(tr, mask, set)) |
---|
191 | | - return 0; |
---|
192 | | - |
---|
193 | 100 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
194 | | - if (mask & TRACE_ITER_DISPLAY_GRAPH) |
---|
195 | | - return wakeup_display_graph(tr, set); |
---|
196 | | -#endif |
---|
197 | 101 | |
---|
198 | | - return trace_keep_overwrite(tracer, mask, set); |
---|
199 | | -} |
---|
200 | | - |
---|
201 | | -static int start_func_tracer(struct trace_array *tr, int graph) |
---|
202 | | -{ |
---|
203 | | - int ret; |
---|
204 | | - |
---|
205 | | - ret = register_wakeup_function(tr, graph, 0); |
---|
206 | | - |
---|
207 | | - if (!ret && tracing_is_enabled()) |
---|
208 | | - tracer_enabled = 1; |
---|
209 | | - else |
---|
210 | | - tracer_enabled = 0; |
---|
211 | | - |
---|
212 | | - return ret; |
---|
213 | | -} |
---|
214 | | - |
---|
215 | | -static void stop_func_tracer(struct trace_array *tr, int graph) |
---|
216 | | -{ |
---|
217 | | - tracer_enabled = 0; |
---|
218 | | - |
---|
219 | | - unregister_wakeup_function(tr, graph); |
---|
220 | | -} |
---|
221 | | - |
---|
222 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
223 | 102 | static int wakeup_display_graph(struct trace_array *tr, int set) |
---|
224 | 103 | { |
---|
225 | 104 | if (!(is_graph(tr) ^ set)) |
---|
.. | .. |
---|
283 | 162 | return; |
---|
284 | 163 | } |
---|
285 | 164 | |
---|
| 165 | +static struct fgraph_ops fgraph_wakeup_ops = { |
---|
| 166 | + .entryfunc = &wakeup_graph_entry, |
---|
| 167 | + .retfunc = &wakeup_graph_return, |
---|
| 168 | +}; |
---|
| 169 | + |
---|
286 | 170 | static void wakeup_trace_open(struct trace_iterator *iter) |
---|
287 | 171 | { |
---|
288 | 172 | if (is_graph(iter->tr)) |
---|
289 | 173 | graph_trace_open(iter); |
---|
| 174 | + else |
---|
| 175 | + iter->private = NULL; |
---|
290 | 176 | } |
---|
291 | 177 | |
---|
292 | 178 | static void wakeup_trace_close(struct trace_iterator *iter) |
---|
.. | .. |
---|
296 | 182 | } |
---|
297 | 183 | |
---|
298 | 184 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ |
---|
299 | | - TRACE_GRAPH_PRINT_ABS_TIME | \ |
---|
300 | | - TRACE_GRAPH_PRINT_DURATION) |
---|
| 185 | + TRACE_GRAPH_PRINT_CPU | \ |
---|
| 186 | + TRACE_GRAPH_PRINT_REL_TIME | \ |
---|
| 187 | + TRACE_GRAPH_PRINT_DURATION | \ |
---|
| 188 | + TRACE_GRAPH_PRINT_OVERHEAD | \ |
---|
| 189 | + TRACE_GRAPH_PRINT_IRQS) |
---|
301 | 190 | |
---|
302 | 191 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
---|
303 | 192 | { |
---|
.. | .. |
---|
318 | 207 | else |
---|
319 | 208 | trace_default_header(s); |
---|
320 | 209 | } |
---|
| 210 | +#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */ |
---|
| 211 | + |
---|
| 212 | +/* |
---|
| 213 | + * wakeup uses its own tracer function to keep the overhead down: |
---|
| 214 | + */ |
---|
| 215 | +static void |
---|
| 216 | +wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, |
---|
| 217 | + struct ftrace_ops *op, struct pt_regs *pt_regs) |
---|
| 218 | +{ |
---|
| 219 | + struct trace_array *tr = wakeup_trace; |
---|
| 220 | + struct trace_array_cpu *data; |
---|
| 221 | + unsigned long flags; |
---|
| 222 | + int pc; |
---|
| 223 | + |
---|
| 224 | + if (!func_prolog_preempt_disable(tr, &data, &pc)) |
---|
| 225 | + return; |
---|
| 226 | + |
---|
| 227 | + local_irq_save(flags); |
---|
| 228 | + trace_function(tr, ip, parent_ip, flags, pc); |
---|
| 229 | + local_irq_restore(flags); |
---|
| 230 | + |
---|
| 231 | + atomic_dec(&data->disabled); |
---|
| 232 | + preempt_enable_notrace(); |
---|
| 233 | +} |
---|
| 234 | + |
---|
| 235 | +static int register_wakeup_function(struct trace_array *tr, int graph, int set) |
---|
| 236 | +{ |
---|
| 237 | + int ret; |
---|
| 238 | + |
---|
| 239 | + /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
---|
| 240 | + if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
---|
| 241 | + return 0; |
---|
| 242 | + |
---|
| 243 | + if (graph) |
---|
| 244 | + ret = register_ftrace_graph(&fgraph_wakeup_ops); |
---|
| 245 | + else |
---|
| 246 | + ret = register_ftrace_function(tr->ops); |
---|
| 247 | + |
---|
| 248 | + if (!ret) |
---|
| 249 | + function_enabled = true; |
---|
| 250 | + |
---|
| 251 | + return ret; |
---|
| 252 | +} |
---|
| 253 | + |
---|
| 254 | +static void unregister_wakeup_function(struct trace_array *tr, int graph) |
---|
| 255 | +{ |
---|
| 256 | + if (!function_enabled) |
---|
| 257 | + return; |
---|
| 258 | + |
---|
| 259 | + if (graph) |
---|
| 260 | + unregister_ftrace_graph(&fgraph_wakeup_ops); |
---|
| 261 | + else |
---|
| 262 | + unregister_ftrace_function(tr->ops); |
---|
| 263 | + |
---|
| 264 | + function_enabled = false; |
---|
| 265 | +} |
---|
| 266 | + |
---|
| 267 | +static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) |
---|
| 268 | +{ |
---|
| 269 | + if (!(mask & TRACE_ITER_FUNCTION)) |
---|
| 270 | + return 0; |
---|
| 271 | + |
---|
| 272 | + if (set) |
---|
| 273 | + register_wakeup_function(tr, is_graph(tr), 1); |
---|
| 274 | + else |
---|
| 275 | + unregister_wakeup_function(tr, is_graph(tr)); |
---|
| 276 | + return 1; |
---|
| 277 | +} |
---|
| 278 | +#else /* CONFIG_FUNCTION_TRACER */ |
---|
| 279 | +static int register_wakeup_function(struct trace_array *tr, int graph, int set) |
---|
| 280 | +{ |
---|
| 281 | + return 0; |
---|
| 282 | +} |
---|
| 283 | +static void unregister_wakeup_function(struct trace_array *tr, int graph) { } |
---|
| 284 | +static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) |
---|
| 285 | +{ |
---|
| 286 | + return 0; |
---|
| 287 | +} |
---|
| 288 | +#endif /* else CONFIG_FUNCTION_TRACER */ |
---|
| 289 | + |
---|
| 290 | +#ifndef CONFIG_FUNCTION_GRAPH_TRACER |
---|
| 291 | +static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
---|
| 292 | +{ |
---|
| 293 | + return TRACE_TYPE_UNHANDLED; |
---|
| 294 | +} |
---|
| 295 | + |
---|
| 296 | +static void wakeup_trace_open(struct trace_iterator *iter) { } |
---|
| 297 | +static void wakeup_trace_close(struct trace_iterator *iter) { } |
---|
| 298 | + |
---|
| 299 | +static void wakeup_print_header(struct seq_file *s) |
---|
| 300 | +{ |
---|
| 301 | + trace_default_header(s); |
---|
| 302 | +} |
---|
| 303 | +#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
---|
321 | 304 | |
---|
322 | 305 | static void |
---|
323 | 306 | __trace_function(struct trace_array *tr, |
---|
.. | .. |
---|
329 | 312 | else |
---|
330 | 313 | trace_function(tr, ip, parent_ip, flags, pc); |
---|
331 | 314 | } |
---|
332 | | -#else |
---|
333 | | -#define __trace_function trace_function |
---|
334 | 315 | |
---|
335 | | -static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
---|
| 316 | +static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
---|
336 | 317 | { |
---|
337 | | - return TRACE_TYPE_UNHANDLED; |
---|
| 318 | + struct tracer *tracer = tr->current_trace; |
---|
| 319 | + |
---|
| 320 | + if (wakeup_function_set(tr, mask, set)) |
---|
| 321 | + return 0; |
---|
| 322 | + |
---|
| 323 | +#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
| 324 | + if (mask & TRACE_ITER_DISPLAY_GRAPH) |
---|
| 325 | + return wakeup_display_graph(tr, set); |
---|
| 326 | +#endif |
---|
| 327 | + |
---|
| 328 | + return trace_keep_overwrite(tracer, mask, set); |
---|
338 | 329 | } |
---|
339 | 330 | |
---|
340 | | -static void wakeup_trace_open(struct trace_iterator *iter) { } |
---|
341 | | -static void wakeup_trace_close(struct trace_iterator *iter) { } |
---|
| 331 | +static int start_func_tracer(struct trace_array *tr, int graph) |
---|
| 332 | +{ |
---|
| 333 | + int ret; |
---|
342 | 334 | |
---|
343 | | -#ifdef CONFIG_FUNCTION_TRACER |
---|
344 | | -static int wakeup_graph_entry(struct ftrace_graph_ent *trace) |
---|
345 | | -{ |
---|
346 | | - return -1; |
---|
| 335 | + ret = register_wakeup_function(tr, graph, 0); |
---|
| 336 | + |
---|
| 337 | + if (!ret && tracing_is_enabled()) |
---|
| 338 | + tracer_enabled = 1; |
---|
| 339 | + else |
---|
| 340 | + tracer_enabled = 0; |
---|
| 341 | + |
---|
| 342 | + return ret; |
---|
347 | 343 | } |
---|
348 | | -static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } |
---|
349 | | -static void wakeup_print_header(struct seq_file *s) |
---|
| 344 | + |
---|
| 345 | +static void stop_func_tracer(struct trace_array *tr, int graph) |
---|
350 | 346 | { |
---|
351 | | - trace_default_header(s); |
---|
| 347 | + tracer_enabled = 0; |
---|
| 348 | + |
---|
| 349 | + unregister_wakeup_function(tr, graph); |
---|
352 | 350 | } |
---|
353 | | -#else |
---|
354 | | -static void wakeup_print_header(struct seq_file *s) |
---|
355 | | -{ |
---|
356 | | - trace_latency_header(s); |
---|
357 | | -} |
---|
358 | | -#endif /* CONFIG_FUNCTION_TRACER */ |
---|
359 | | -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
---|
360 | 351 | |
---|
361 | 352 | /* |
---|
362 | 353 | * Should this new latency be reported/recorded? |
---|
.. | .. |
---|
389 | 380 | unsigned long flags, int pc) |
---|
390 | 381 | { |
---|
391 | 382 | struct trace_event_call *call = &event_context_switch; |
---|
392 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 383 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
393 | 384 | struct ring_buffer_event *event; |
---|
394 | 385 | struct ctx_switch_entry *entry; |
---|
395 | 386 | |
---|
.. | .. |
---|
419 | 410 | struct trace_event_call *call = &event_wakeup; |
---|
420 | 411 | struct ring_buffer_event *event; |
---|
421 | 412 | struct ctx_switch_entry *entry; |
---|
422 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 413 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
423 | 414 | |
---|
424 | 415 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
---|
425 | 416 | sizeof(*entry), flags, pc); |
---|
.. | .. |
---|
470 | 461 | |
---|
471 | 462 | /* disable local data, not wakeup_cpu data */ |
---|
472 | 463 | cpu = raw_smp_processor_id(); |
---|
473 | | - disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 464 | + disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
474 | 465 | if (likely(disabled != 1)) |
---|
475 | 466 | goto out; |
---|
476 | 467 | |
---|
.. | .. |
---|
482 | 473 | goto out_unlock; |
---|
483 | 474 | |
---|
484 | 475 | /* The task we are waiting for is waking up */ |
---|
485 | | - data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
---|
| 476 | + data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); |
---|
486 | 477 | |
---|
487 | 478 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
---|
488 | 479 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
---|
| 480 | + __trace_stack(wakeup_trace, flags, 0, pc); |
---|
489 | 481 | |
---|
490 | 482 | T0 = data->preempt_timestamp; |
---|
491 | 483 | T1 = ftrace_now(cpu); |
---|
.. | .. |
---|
496 | 488 | |
---|
497 | 489 | if (likely(!is_tracing_stopped())) { |
---|
498 | 490 | wakeup_trace->max_latency = delta; |
---|
499 | | - update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
---|
| 491 | + update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL); |
---|
500 | 492 | } |
---|
501 | 493 | |
---|
502 | 494 | out_unlock: |
---|
.. | .. |
---|
504 | 496 | arch_spin_unlock(&wakeup_lock); |
---|
505 | 497 | local_irq_restore(flags); |
---|
506 | 498 | out: |
---|
507 | | - atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 499 | + atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
508 | 500 | } |
---|
509 | 501 | |
---|
510 | 502 | static void __wakeup_reset(struct trace_array *tr) |
---|
.. | .. |
---|
523 | 515 | { |
---|
524 | 516 | unsigned long flags; |
---|
525 | 517 | |
---|
526 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 518 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
527 | 519 | |
---|
528 | 520 | local_irq_save(flags); |
---|
529 | 521 | arch_spin_lock(&wakeup_lock); |
---|
.. | .. |
---|
561 | 553 | return; |
---|
562 | 554 | |
---|
563 | 555 | pc = preempt_count(); |
---|
564 | | - disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 556 | + disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
565 | 557 | if (unlikely(disabled != 1)) |
---|
566 | 558 | goto out; |
---|
567 | 559 | |
---|
.. | .. |
---|
589 | 581 | else |
---|
590 | 582 | tracing_dl = 0; |
---|
591 | 583 | |
---|
592 | | - wakeup_task = p; |
---|
593 | | - get_task_struct(wakeup_task); |
---|
| 584 | + wakeup_task = get_task_struct(p); |
---|
594 | 585 | |
---|
595 | 586 | local_save_flags(flags); |
---|
596 | 587 | |
---|
597 | | - data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
---|
| 588 | + data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); |
---|
598 | 589 | data->preempt_timestamp = ftrace_now(cpu); |
---|
599 | 590 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
---|
| 591 | + __trace_stack(wakeup_trace, flags, 0, pc); |
---|
600 | 592 | |
---|
601 | 593 | /* |
---|
602 | 594 | * We must be careful in using CALLER_ADDR2. But since wake_up |
---|
.. | .. |
---|
608 | 600 | out_locked: |
---|
609 | 601 | arch_spin_unlock(&wakeup_lock); |
---|
610 | 602 | out: |
---|
611 | | - atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 603 | + atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
612 | 604 | } |
---|
613 | 605 | |
---|
614 | 606 | static void start_wakeup_tracer(struct trace_array *tr) |
---|