| .. | .. |
|---|
| 32 | 32 | overrun_detected = false; |
|---|
| 33 | 33 | prev_overruns = 0; |
|---|
| 34 | 34 | |
|---|
| 35 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
|---|
| 35 | + tracing_reset_online_cpus(&tr->array_buffer); |
|---|
| 36 | 36 | } |
|---|
| 37 | 37 | |
|---|
| 38 | 38 | static int mmio_trace_init(struct trace_array *tr) |
|---|
| .. | .. |
|---|
| 122 | 122 | static unsigned long count_overruns(struct trace_iterator *iter) |
|---|
| 123 | 123 | { |
|---|
| 124 | 124 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
|---|
| 125 | | - unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); |
|---|
| 125 | + unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer); |
|---|
| 126 | 126 | |
|---|
| 127 | 127 | if (over > prev_overruns) |
|---|
| 128 | 128 | cnt += over - prev_overruns; |
|---|
| .. | .. |
|---|
| 297 | 297 | struct mmiotrace_rw *rw) |
|---|
| 298 | 298 | { |
|---|
| 299 | 299 | struct trace_event_call *call = &event_mmiotrace_rw; |
|---|
| 300 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
|---|
| 300 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
|---|
| 301 | 301 | struct ring_buffer_event *event; |
|---|
| 302 | 302 | struct trace_mmiotrace_rw *entry; |
|---|
| 303 | 303 | int pc = preempt_count(); |
|---|
| .. | .. |
|---|
| 318 | 318 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
|---|
| 319 | 319 | { |
|---|
| 320 | 320 | struct trace_array *tr = mmio_trace_array; |
|---|
| 321 | | - struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
|---|
| 321 | + struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id()); |
|---|
| 322 | 322 | __trace_mmiotrace_rw(tr, data, rw); |
|---|
| 323 | 323 | } |
|---|
| 324 | 324 | |
|---|
| .. | .. |
|---|
| 327 | 327 | struct mmiotrace_map *map) |
|---|
| 328 | 328 | { |
|---|
| 329 | 329 | struct trace_event_call *call = &event_mmiotrace_map; |
|---|
| 330 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
|---|
| 330 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
|---|
| 331 | 331 | struct ring_buffer_event *event; |
|---|
| 332 | 332 | struct trace_mmiotrace_map *entry; |
|---|
| 333 | 333 | int pc = preempt_count(); |
|---|
| .. | .. |
|---|
| 351 | 351 | struct trace_array_cpu *data; |
|---|
| 352 | 352 | |
|---|
| 353 | 353 | preempt_disable(); |
|---|
| 354 | | - data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
|---|
| 354 | + data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id()); |
|---|
| 355 | 355 | __trace_mmiotrace_map(tr, data, map); |
|---|
| 356 | 356 | preempt_enable(); |
|---|
| 357 | 357 | } |
|---|