.. | .. |
---|
32 | 32 | overrun_detected = false; |
---|
33 | 33 | prev_overruns = 0; |
---|
34 | 34 | |
---|
35 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 35 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
36 | 36 | } |
---|
37 | 37 | |
---|
38 | 38 | static int mmio_trace_init(struct trace_array *tr) |
---|
.. | .. |
---|
122 | 122 | static unsigned long count_overruns(struct trace_iterator *iter) |
---|
123 | 123 | { |
---|
124 | 124 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
---|
125 | | - unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); |
---|
| 125 | + unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer); |
---|
126 | 126 | |
---|
127 | 127 | if (over > prev_overruns) |
---|
128 | 128 | cnt += over - prev_overruns; |
---|
.. | .. |
---|
297 | 297 | struct mmiotrace_rw *rw) |
---|
298 | 298 | { |
---|
299 | 299 | struct trace_event_call *call = &event_mmiotrace_rw; |
---|
300 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 300 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
301 | 301 | struct ring_buffer_event *event; |
---|
302 | 302 | struct trace_mmiotrace_rw *entry; |
---|
303 | | - int pc = preempt_count(); |
---|
| 303 | + unsigned int trace_ctx; |
---|
304 | 304 | |
---|
| 305 | + trace_ctx = tracing_gen_ctx_flags(0); |
---|
305 | 306 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
---|
306 | | - sizeof(*entry), 0, pc); |
---|
| 307 | + sizeof(*entry), trace_ctx); |
---|
307 | 308 | if (!event) { |
---|
308 | 309 | atomic_inc(&dropped_count); |
---|
309 | 310 | return; |
---|
.. | .. |
---|
312 | 313 | entry->rw = *rw; |
---|
313 | 314 | |
---|
314 | 315 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
315 | | - trace_buffer_unlock_commit(tr, buffer, event, 0, pc); |
---|
| 316 | + trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); |
---|
316 | 317 | } |
---|
317 | 318 | |
---|
318 | 319 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
---|
319 | 320 | { |
---|
320 | 321 | struct trace_array *tr = mmio_trace_array; |
---|
321 | | - struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
---|
| 322 | + struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id()); |
---|
322 | 323 | __trace_mmiotrace_rw(tr, data, rw); |
---|
323 | 324 | } |
---|
324 | 325 | |
---|
.. | .. |
---|
327 | 328 | struct mmiotrace_map *map) |
---|
328 | 329 | { |
---|
329 | 330 | struct trace_event_call *call = &event_mmiotrace_map; |
---|
330 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 331 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
331 | 332 | struct ring_buffer_event *event; |
---|
332 | 333 | struct trace_mmiotrace_map *entry; |
---|
333 | | - int pc = preempt_count(); |
---|
| 334 | + unsigned int trace_ctx; |
---|
334 | 335 | |
---|
| 336 | + trace_ctx = tracing_gen_ctx_flags(0); |
---|
335 | 337 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
---|
336 | | - sizeof(*entry), 0, pc); |
---|
| 338 | + sizeof(*entry), trace_ctx); |
---|
337 | 339 | if (!event) { |
---|
338 | 340 | atomic_inc(&dropped_count); |
---|
339 | 341 | return; |
---|
.. | .. |
---|
342 | 344 | entry->map = *map; |
---|
343 | 345 | |
---|
344 | 346 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
345 | | - trace_buffer_unlock_commit(tr, buffer, event, 0, pc); |
---|
| 347 | + trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); |
---|
346 | 348 | } |
---|
347 | 349 | |
---|
348 | 350 | void mmio_trace_mapping(struct mmiotrace_map *map) |
---|
.. | .. |
---|
351 | 353 | struct trace_array_cpu *data; |
---|
352 | 354 | |
---|
353 | 355 | preempt_disable(); |
---|
354 | | - data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
---|
| 356 | + data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id()); |
---|
355 | 357 | __trace_mmiotrace_map(tr, data, map); |
---|
356 | 358 | preempt_enable(); |
---|
357 | 359 | } |
---|