.. | .. |
---|
35 | 35 | |
---|
36 | 36 | static void wakeup_reset(struct trace_array *tr); |
---|
37 | 37 | static void __wakeup_reset(struct trace_array *tr); |
---|
| 38 | +static int start_func_tracer(struct trace_array *tr, int graph); |
---|
| 39 | +static void stop_func_tracer(struct trace_array *tr, int graph); |
---|
38 | 40 | |
---|
39 | 41 | static int save_flags; |
---|
40 | 42 | |
---|
41 | 43 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
42 | | -static int wakeup_display_graph(struct trace_array *tr, int set); |
---|
43 | 44 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
---|
44 | 45 | #else |
---|
45 | | -static inline int wakeup_display_graph(struct trace_array *tr, int set) |
---|
46 | | -{ |
---|
47 | | - return 0; |
---|
48 | | -} |
---|
49 | 46 | # define is_graph(tr) false |
---|
50 | 47 | #endif |
---|
51 | 48 | |
---|
52 | | - |
---|
53 | 49 | #ifdef CONFIG_FUNCTION_TRACER |
---|
54 | | - |
---|
55 | | -static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
---|
56 | | -static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
---|
57 | 50 | |
---|
58 | 51 | static bool function_enabled; |
---|
59 | 52 | |
---|
.. | .. |
---|
74 | 67 | static int |
---|
75 | 68 | func_prolog_preempt_disable(struct trace_array *tr, |
---|
76 | 69 | struct trace_array_cpu **data, |
---|
77 | | - int *pc) |
---|
| 70 | + unsigned int *trace_ctx) |
---|
78 | 71 | { |
---|
79 | 72 | long disabled; |
---|
80 | 73 | int cpu; |
---|
.. | .. |
---|
82 | 75 | if (likely(!wakeup_task)) |
---|
83 | 76 | return 0; |
---|
84 | 77 | |
---|
85 | | - *pc = preempt_count(); |
---|
| 78 | + *trace_ctx = tracing_gen_ctx(); |
---|
86 | 79 | preempt_disable_notrace(); |
---|
87 | 80 | |
---|
88 | 81 | cpu = raw_smp_processor_id(); |
---|
89 | 82 | if (cpu != wakeup_current_cpu) |
---|
90 | 83 | goto out_enable; |
---|
91 | 84 | |
---|
92 | | - *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
---|
| 85 | + *data = per_cpu_ptr(tr->array_buffer.data, cpu); |
---|
93 | 86 | disabled = atomic_inc_return(&(*data)->disabled); |
---|
94 | 87 | if (unlikely(disabled != 1)) |
---|
95 | 88 | goto out; |
---|
.. | .. |
---|
104 | 97 | return 0; |
---|
105 | 98 | } |
---|
106 | 99 | |
---|
| 100 | +#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
| 101 | + |
---|
| 102 | +static int wakeup_display_graph(struct trace_array *tr, int set) |
---|
| 103 | +{ |
---|
| 104 | + if (!(is_graph(tr) ^ set)) |
---|
| 105 | + return 0; |
---|
| 106 | + |
---|
| 107 | + stop_func_tracer(tr, !set); |
---|
| 108 | + |
---|
| 109 | + wakeup_reset(wakeup_trace); |
---|
| 110 | + tr->max_latency = 0; |
---|
| 111 | + |
---|
| 112 | + return start_func_tracer(tr, set); |
---|
| 113 | +} |
---|
| 114 | + |
---|
| 115 | +static int wakeup_graph_entry(struct ftrace_graph_ent *trace) |
---|
| 116 | +{ |
---|
| 117 | + struct trace_array *tr = wakeup_trace; |
---|
| 118 | + struct trace_array_cpu *data; |
---|
| 119 | + unsigned int trace_ctx; |
---|
| 120 | + int ret = 0; |
---|
| 121 | + |
---|
| 122 | + if (ftrace_graph_ignore_func(trace)) |
---|
| 123 | + return 0; |
---|
| 124 | + /* |
---|
| 125 | + * Do not trace a function if it's filtered by set_graph_notrace. |
---|
| 126 | + * Make the index of ret stack negative to indicate that it should |
---|
| 127 | + * ignore further functions. But it needs its own ret stack entry |
---|
| 128 | + * to recover the original index in order to continue tracing after |
---|
| 129 | + * returning from the function. |
---|
| 130 | + */ |
---|
| 131 | + if (ftrace_graph_notrace_addr(trace->func)) |
---|
| 132 | + return 1; |
---|
| 133 | + |
---|
| 134 | + if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) |
---|
| 135 | + return 0; |
---|
| 136 | + |
---|
| 137 | + ret = __trace_graph_entry(tr, trace, trace_ctx); |
---|
| 138 | + atomic_dec(&data->disabled); |
---|
| 139 | + preempt_enable_notrace(); |
---|
| 140 | + |
---|
| 141 | + return ret; |
---|
| 142 | +} |
---|
| 143 | + |
---|
| 144 | +static void wakeup_graph_return(struct ftrace_graph_ret *trace) |
---|
| 145 | +{ |
---|
| 146 | + struct trace_array *tr = wakeup_trace; |
---|
| 147 | + struct trace_array_cpu *data; |
---|
| 148 | + unsigned int trace_ctx; |
---|
| 149 | + |
---|
| 150 | + ftrace_graph_addr_finish(trace); |
---|
| 151 | + |
---|
| 152 | + if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) |
---|
| 153 | + return; |
---|
| 154 | + |
---|
| 155 | + __trace_graph_return(tr, trace, trace_ctx); |
---|
| 156 | + atomic_dec(&data->disabled); |
---|
| 157 | + |
---|
| 158 | + preempt_enable_notrace(); |
---|
| 159 | + return; |
---|
| 160 | +} |
---|
| 161 | + |
---|
| 162 | +static struct fgraph_ops fgraph_wakeup_ops = { |
---|
| 163 | + .entryfunc = &wakeup_graph_entry, |
---|
| 164 | + .retfunc = &wakeup_graph_return, |
---|
| 165 | +}; |
---|
| 166 | + |
---|
| 167 | +static void wakeup_trace_open(struct trace_iterator *iter) |
---|
| 168 | +{ |
---|
| 169 | + if (is_graph(iter->tr)) |
---|
| 170 | + graph_trace_open(iter); |
---|
| 171 | +} |
---|
| 172 | + |
---|
| 173 | +static void wakeup_trace_close(struct trace_iterator *iter) |
---|
| 174 | +{ |
---|
| 175 | + if (iter->private) |
---|
| 176 | + graph_trace_close(iter); |
---|
| 177 | +} |
---|
| 178 | + |
---|
| 179 | +#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ |
---|
| 180 | + TRACE_GRAPH_PRINT_CPU | \ |
---|
| 181 | + TRACE_GRAPH_PRINT_REL_TIME | \ |
---|
| 182 | + TRACE_GRAPH_PRINT_DURATION | \ |
---|
| 183 | + TRACE_GRAPH_PRINT_OVERHEAD | \ |
---|
| 184 | + TRACE_GRAPH_PRINT_IRQS) |
---|
| 185 | + |
---|
| 186 | +static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
---|
| 187 | +{ |
---|
| 188 | + /* |
---|
| 189 | + * In graph mode call the graph tracer output function, |
---|
| 190 | + * otherwise go with the TRACE_FN event handler |
---|
| 191 | + */ |
---|
| 192 | + if (is_graph(iter->tr)) |
---|
| 193 | + return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
---|
| 194 | + |
---|
| 195 | + return TRACE_TYPE_UNHANDLED; |
---|
| 196 | +} |
---|
| 197 | + |
---|
| 198 | +static void wakeup_print_header(struct seq_file *s) |
---|
| 199 | +{ |
---|
| 200 | + if (is_graph(wakeup_trace)) |
---|
| 201 | + print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
---|
| 202 | + else |
---|
| 203 | + trace_default_header(s); |
---|
| 204 | +} |
---|
| 205 | +#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */ |
---|
| 206 | + |
---|
107 | 207 | /* |
---|
108 | 208 | * wakeup uses its own tracer function to keep the overhead down: |
---|
109 | 209 | */ |
---|
.. | .. |
---|
114 | 214 | struct trace_array *tr = wakeup_trace; |
---|
115 | 215 | struct trace_array_cpu *data; |
---|
116 | 216 | unsigned long flags; |
---|
117 | | - int pc; |
---|
| 217 | + unsigned int trace_ctx; |
---|
118 | 218 | |
---|
119 | | - if (!func_prolog_preempt_disable(tr, &data, &pc)) |
---|
| 219 | + if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) |
---|
120 | 220 | return; |
---|
121 | 221 | |
---|
122 | 222 | local_irq_save(flags); |
---|
123 | | - trace_function(tr, ip, parent_ip, flags, pc); |
---|
| 223 | + trace_function(tr, ip, parent_ip, trace_ctx); |
---|
124 | 224 | local_irq_restore(flags); |
---|
125 | 225 | |
---|
126 | 226 | atomic_dec(&data->disabled); |
---|
.. | .. |
---|
136 | 236 | return 0; |
---|
137 | 237 | |
---|
138 | 238 | if (graph) |
---|
139 | | - ret = register_ftrace_graph(&wakeup_graph_return, |
---|
140 | | - &wakeup_graph_entry); |
---|
| 239 | + ret = register_ftrace_graph(&fgraph_wakeup_ops); |
---|
141 | 240 | else |
---|
142 | 241 | ret = register_ftrace_function(tr->ops); |
---|
143 | 242 | |
---|
.. | .. |
---|
153 | 252 | return; |
---|
154 | 253 | |
---|
155 | 254 | if (graph) |
---|
156 | | - unregister_ftrace_graph(); |
---|
| 255 | + unregister_ftrace_graph(&fgraph_wakeup_ops); |
---|
157 | 256 | else |
---|
158 | 257 | unregister_ftrace_function(tr->ops); |
---|
159 | 258 | |
---|
.. | .. |
---|
171 | 270 | unregister_wakeup_function(tr, is_graph(tr)); |
---|
172 | 271 | return 1; |
---|
173 | 272 | } |
---|
174 | | -#else |
---|
| 273 | +#else /* CONFIG_FUNCTION_TRACER */ |
---|
175 | 274 | static int register_wakeup_function(struct trace_array *tr, int graph, int set) |
---|
176 | 275 | { |
---|
177 | 276 | return 0; |
---|
.. | .. |
---|
181 | 280 | { |
---|
182 | 281 | return 0; |
---|
183 | 282 | } |
---|
184 | | -#endif /* CONFIG_FUNCTION_TRACER */ |
---|
| 283 | +#endif /* else CONFIG_FUNCTION_TRACER */ |
---|
| 284 | + |
---|
| 285 | +#ifndef CONFIG_FUNCTION_GRAPH_TRACER |
---|
| 286 | +static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
---|
| 287 | +{ |
---|
| 288 | + return TRACE_TYPE_UNHANDLED; |
---|
| 289 | +} |
---|
| 290 | + |
---|
| 291 | +static void wakeup_trace_open(struct trace_iterator *iter) { } |
---|
| 292 | +static void wakeup_trace_close(struct trace_iterator *iter) { } |
---|
| 293 | + |
---|
| 294 | +static void wakeup_print_header(struct seq_file *s) |
---|
| 295 | +{ |
---|
| 296 | + trace_default_header(s); |
---|
| 297 | +} |
---|
| 298 | +#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
---|
| 299 | + |
---|
| 300 | +static void |
---|
| 301 | +__trace_function(struct trace_array *tr, |
---|
| 302 | + unsigned long ip, unsigned long parent_ip, |
---|
| 303 | + unsigned int trace_ctx) |
---|
| 304 | +{ |
---|
| 305 | + if (is_graph(tr)) |
---|
| 306 | + trace_graph_function(tr, ip, parent_ip, trace_ctx); |
---|
| 307 | + else |
---|
| 308 | + trace_function(tr, ip, parent_ip, trace_ctx); |
---|
| 309 | +} |
---|
185 | 310 | |
---|
186 | 311 | static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
---|
187 | 312 | { |
---|
.. | .. |
---|
219 | 344 | unregister_wakeup_function(tr, graph); |
---|
220 | 345 | } |
---|
221 | 346 | |
---|
222 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
223 | | -static int wakeup_display_graph(struct trace_array *tr, int set) |
---|
224 | | -{ |
---|
225 | | - if (!(is_graph(tr) ^ set)) |
---|
226 | | - return 0; |
---|
227 | | - |
---|
228 | | - stop_func_tracer(tr, !set); |
---|
229 | | - |
---|
230 | | - wakeup_reset(wakeup_trace); |
---|
231 | | - tr->max_latency = 0; |
---|
232 | | - |
---|
233 | | - return start_func_tracer(tr, set); |
---|
234 | | -} |
---|
235 | | - |
---|
236 | | -static int wakeup_graph_entry(struct ftrace_graph_ent *trace) |
---|
237 | | -{ |
---|
238 | | - struct trace_array *tr = wakeup_trace; |
---|
239 | | - struct trace_array_cpu *data; |
---|
240 | | - unsigned long flags; |
---|
241 | | - int pc, ret = 0; |
---|
242 | | - |
---|
243 | | - if (ftrace_graph_ignore_func(trace)) |
---|
244 | | - return 0; |
---|
245 | | - /* |
---|
246 | | - * Do not trace a function if it's filtered by set_graph_notrace. |
---|
247 | | - * Make the index of ret stack negative to indicate that it should |
---|
248 | | - * ignore further functions. But it needs its own ret stack entry |
---|
249 | | - * to recover the original index in order to continue tracing after |
---|
250 | | - * returning from the function. |
---|
251 | | - */ |
---|
252 | | - if (ftrace_graph_notrace_addr(trace->func)) |
---|
253 | | - return 1; |
---|
254 | | - |
---|
255 | | - if (!func_prolog_preempt_disable(tr, &data, &pc)) |
---|
256 | | - return 0; |
---|
257 | | - |
---|
258 | | - local_save_flags(flags); |
---|
259 | | - ret = __trace_graph_entry(tr, trace, flags, pc); |
---|
260 | | - atomic_dec(&data->disabled); |
---|
261 | | - preempt_enable_notrace(); |
---|
262 | | - |
---|
263 | | - return ret; |
---|
264 | | -} |
---|
265 | | - |
---|
266 | | -static void wakeup_graph_return(struct ftrace_graph_ret *trace) |
---|
267 | | -{ |
---|
268 | | - struct trace_array *tr = wakeup_trace; |
---|
269 | | - struct trace_array_cpu *data; |
---|
270 | | - unsigned long flags; |
---|
271 | | - int pc; |
---|
272 | | - |
---|
273 | | - ftrace_graph_addr_finish(trace); |
---|
274 | | - |
---|
275 | | - if (!func_prolog_preempt_disable(tr, &data, &pc)) |
---|
276 | | - return; |
---|
277 | | - |
---|
278 | | - local_save_flags(flags); |
---|
279 | | - __trace_graph_return(tr, trace, flags, pc); |
---|
280 | | - atomic_dec(&data->disabled); |
---|
281 | | - |
---|
282 | | - preempt_enable_notrace(); |
---|
283 | | - return; |
---|
284 | | -} |
---|
285 | | - |
---|
286 | | -static void wakeup_trace_open(struct trace_iterator *iter) |
---|
287 | | -{ |
---|
288 | | - if (is_graph(iter->tr)) |
---|
289 | | - graph_trace_open(iter); |
---|
290 | | -} |
---|
291 | | - |
---|
292 | | -static void wakeup_trace_close(struct trace_iterator *iter) |
---|
293 | | -{ |
---|
294 | | - if (iter->private) |
---|
295 | | - graph_trace_close(iter); |
---|
296 | | -} |
---|
297 | | - |
---|
298 | | -#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ |
---|
299 | | - TRACE_GRAPH_PRINT_ABS_TIME | \ |
---|
300 | | - TRACE_GRAPH_PRINT_DURATION) |
---|
301 | | - |
---|
302 | | -static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
---|
303 | | -{ |
---|
304 | | - /* |
---|
305 | | - * In graph mode call the graph tracer output function, |
---|
306 | | - * otherwise go with the TRACE_FN event handler |
---|
307 | | - */ |
---|
308 | | - if (is_graph(iter->tr)) |
---|
309 | | - return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
---|
310 | | - |
---|
311 | | - return TRACE_TYPE_UNHANDLED; |
---|
312 | | -} |
---|
313 | | - |
---|
314 | | -static void wakeup_print_header(struct seq_file *s) |
---|
315 | | -{ |
---|
316 | | - if (is_graph(wakeup_trace)) |
---|
317 | | - print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
---|
318 | | - else |
---|
319 | | - trace_default_header(s); |
---|
320 | | -} |
---|
321 | | - |
---|
322 | | -static void |
---|
323 | | -__trace_function(struct trace_array *tr, |
---|
324 | | - unsigned long ip, unsigned long parent_ip, |
---|
325 | | - unsigned long flags, int pc) |
---|
326 | | -{ |
---|
327 | | - if (is_graph(tr)) |
---|
328 | | - trace_graph_function(tr, ip, parent_ip, flags, pc); |
---|
329 | | - else |
---|
330 | | - trace_function(tr, ip, parent_ip, flags, pc); |
---|
331 | | -} |
---|
332 | | -#else |
---|
333 | | -#define __trace_function trace_function |
---|
334 | | - |
---|
335 | | -static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
---|
336 | | -{ |
---|
337 | | - return TRACE_TYPE_UNHANDLED; |
---|
338 | | -} |
---|
339 | | - |
---|
340 | | -static void wakeup_trace_open(struct trace_iterator *iter) { } |
---|
341 | | -static void wakeup_trace_close(struct trace_iterator *iter) { } |
---|
342 | | - |
---|
343 | | -#ifdef CONFIG_FUNCTION_TRACER |
---|
344 | | -static int wakeup_graph_entry(struct ftrace_graph_ent *trace) |
---|
345 | | -{ |
---|
346 | | - return -1; |
---|
347 | | -} |
---|
348 | | -static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } |
---|
349 | | -static void wakeup_print_header(struct seq_file *s) |
---|
350 | | -{ |
---|
351 | | - trace_default_header(s); |
---|
352 | | -} |
---|
353 | | -#else |
---|
354 | | -static void wakeup_print_header(struct seq_file *s) |
---|
355 | | -{ |
---|
356 | | - trace_latency_header(s); |
---|
357 | | -} |
---|
358 | | -#endif /* CONFIG_FUNCTION_TRACER */ |
---|
359 | | -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
---|
360 | | - |
---|
361 | 347 | /* |
---|
362 | 348 | * Should this new latency be reported/recorded? |
---|
363 | 349 | */ |
---|
.. | .. |
---|
386 | 372 | tracing_sched_switch_trace(struct trace_array *tr, |
---|
387 | 373 | struct task_struct *prev, |
---|
388 | 374 | struct task_struct *next, |
---|
389 | | - unsigned long flags, int pc) |
---|
| 375 | + unsigned int trace_ctx) |
---|
390 | 376 | { |
---|
391 | 377 | struct trace_event_call *call = &event_context_switch; |
---|
392 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 378 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
393 | 379 | struct ring_buffer_event *event; |
---|
394 | 380 | struct ctx_switch_entry *entry; |
---|
395 | 381 | |
---|
396 | 382 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
---|
397 | | - sizeof(*entry), flags, pc); |
---|
| 383 | + sizeof(*entry), trace_ctx); |
---|
398 | 384 | if (!event) |
---|
399 | 385 | return; |
---|
400 | 386 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
407 | 393 | entry->next_cpu = task_cpu(next); |
---|
408 | 394 | |
---|
409 | 395 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
410 | | - trace_buffer_unlock_commit(tr, buffer, event, flags, pc); |
---|
| 396 | + trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); |
---|
411 | 397 | } |
---|
412 | 398 | |
---|
413 | 399 | static void |
---|
414 | 400 | tracing_sched_wakeup_trace(struct trace_array *tr, |
---|
415 | 401 | struct task_struct *wakee, |
---|
416 | 402 | struct task_struct *curr, |
---|
417 | | - unsigned long flags, int pc) |
---|
| 403 | + unsigned int trace_ctx) |
---|
418 | 404 | { |
---|
419 | 405 | struct trace_event_call *call = &event_wakeup; |
---|
420 | 406 | struct ring_buffer_event *event; |
---|
421 | 407 | struct ctx_switch_entry *entry; |
---|
422 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 408 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
423 | 409 | |
---|
424 | 410 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
---|
425 | | - sizeof(*entry), flags, pc); |
---|
| 411 | + sizeof(*entry), trace_ctx); |
---|
426 | 412 | if (!event) |
---|
427 | 413 | return; |
---|
428 | 414 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
435 | 421 | entry->next_cpu = task_cpu(wakee); |
---|
436 | 422 | |
---|
437 | 423 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
438 | | - trace_buffer_unlock_commit(tr, buffer, event, flags, pc); |
---|
| 424 | + trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); |
---|
439 | 425 | } |
---|
440 | 426 | |
---|
441 | 427 | static void notrace |
---|
.. | .. |
---|
447 | 433 | unsigned long flags; |
---|
448 | 434 | long disabled; |
---|
449 | 435 | int cpu; |
---|
450 | | - int pc; |
---|
| 436 | + unsigned int trace_ctx; |
---|
451 | 437 | |
---|
452 | 438 | tracing_record_cmdline(prev); |
---|
453 | 439 | |
---|
.. | .. |
---|
466 | 452 | if (next != wakeup_task) |
---|
467 | 453 | return; |
---|
468 | 454 | |
---|
469 | | - pc = preempt_count(); |
---|
470 | | - |
---|
471 | 455 | /* disable local data, not wakeup_cpu data */ |
---|
472 | 456 | cpu = raw_smp_processor_id(); |
---|
473 | | - disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 457 | + disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
474 | 458 | if (likely(disabled != 1)) |
---|
475 | 459 | goto out; |
---|
476 | 460 | |
---|
477 | 461 | local_irq_save(flags); |
---|
| 462 | + trace_ctx = tracing_gen_ctx_flags(flags); |
---|
| 463 | + |
---|
478 | 464 | arch_spin_lock(&wakeup_lock); |
---|
479 | 465 | |
---|
480 | 466 | /* We could race with grabbing wakeup_lock */ |
---|
.. | .. |
---|
482 | 468 | goto out_unlock; |
---|
483 | 469 | |
---|
484 | 470 | /* The task we are waiting for is waking up */ |
---|
485 | | - data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
---|
| 471 | + data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); |
---|
486 | 472 | |
---|
487 | | - __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
---|
488 | | - tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
---|
| 473 | + __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx); |
---|
| 474 | + tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx); |
---|
| 475 | + __trace_stack(wakeup_trace, trace_ctx, 0); |
---|
489 | 476 | |
---|
490 | 477 | T0 = data->preempt_timestamp; |
---|
491 | 478 | T1 = ftrace_now(cpu); |
---|
.. | .. |
---|
496 | 483 | |
---|
497 | 484 | if (likely(!is_tracing_stopped())) { |
---|
498 | 485 | wakeup_trace->max_latency = delta; |
---|
499 | | - update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
---|
| 486 | + update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL); |
---|
500 | 487 | } |
---|
501 | 488 | |
---|
502 | 489 | out_unlock: |
---|
.. | .. |
---|
504 | 491 | arch_spin_unlock(&wakeup_lock); |
---|
505 | 492 | local_irq_restore(flags); |
---|
506 | 493 | out: |
---|
507 | | - atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 494 | + atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
508 | 495 | } |
---|
509 | 496 | |
---|
510 | 497 | static void __wakeup_reset(struct trace_array *tr) |
---|
.. | .. |
---|
523 | 510 | { |
---|
524 | 511 | unsigned long flags; |
---|
525 | 512 | |
---|
526 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 513 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
527 | 514 | |
---|
528 | 515 | local_irq_save(flags); |
---|
529 | 516 | arch_spin_lock(&wakeup_lock); |
---|
.. | .. |
---|
537 | 524 | { |
---|
538 | 525 | struct trace_array_cpu *data; |
---|
539 | 526 | int cpu = smp_processor_id(); |
---|
540 | | - unsigned long flags; |
---|
541 | 527 | long disabled; |
---|
542 | | - int pc; |
---|
| 528 | + unsigned int trace_ctx; |
---|
543 | 529 | |
---|
544 | 530 | if (likely(!tracer_enabled)) |
---|
545 | 531 | return; |
---|
.. | .. |
---|
560 | 546 | (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) |
---|
561 | 547 | return; |
---|
562 | 548 | |
---|
563 | | - pc = preempt_count(); |
---|
564 | | - disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 549 | + disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
565 | 550 | if (unlikely(disabled != 1)) |
---|
566 | 551 | goto out; |
---|
| 552 | + |
---|
| 553 | + trace_ctx = tracing_gen_ctx(); |
---|
567 | 554 | |
---|
568 | 555 | /* interrupts should be off from try_to_wake_up */ |
---|
569 | 556 | arch_spin_lock(&wakeup_lock); |
---|
.. | .. |
---|
589 | 576 | else |
---|
590 | 577 | tracing_dl = 0; |
---|
591 | 578 | |
---|
592 | | - wakeup_task = p; |
---|
593 | | - get_task_struct(wakeup_task); |
---|
| 579 | + wakeup_task = get_task_struct(p); |
---|
594 | 580 | |
---|
595 | | - local_save_flags(flags); |
---|
596 | | - |
---|
597 | | - data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
---|
| 581 | + data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); |
---|
598 | 582 | data->preempt_timestamp = ftrace_now(cpu); |
---|
599 | | - tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
---|
| 583 | + tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx); |
---|
| 584 | + __trace_stack(wakeup_trace, trace_ctx, 0); |
---|
600 | 585 | |
---|
601 | 586 | /* |
---|
602 | 587 | * We must be careful in using CALLER_ADDR2. But since wake_up |
---|
603 | 588 | * is not called by an assembly function (where as schedule is) |
---|
604 | 589 | * it should be safe to use it here. |
---|
605 | 590 | */ |
---|
606 | | - __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
---|
| 591 | + __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx); |
---|
607 | 592 | |
---|
608 | 593 | out_locked: |
---|
609 | 594 | arch_spin_unlock(&wakeup_lock); |
---|
610 | 595 | out: |
---|
611 | | - atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
---|
| 596 | + atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); |
---|
612 | 597 | } |
---|
613 | 598 | |
---|
614 | 599 | static void start_wakeup_tracer(struct trace_array *tr) |
---|