hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/trace/trace_mmiotrace.c
....@@ -32,7 +32,7 @@
3232 overrun_detected = false;
3333 prev_overruns = 0;
3434
35
- tracing_reset_online_cpus(&tr->trace_buffer);
35
+ tracing_reset_online_cpus(&tr->array_buffer);
3636 }
3737
3838 static int mmio_trace_init(struct trace_array *tr)
....@@ -122,7 +122,7 @@
122122 static unsigned long count_overruns(struct trace_iterator *iter)
123123 {
124124 unsigned long cnt = atomic_xchg(&dropped_count, 0);
125
- unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
125
+ unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
126126
127127 if (over > prev_overruns)
128128 cnt += over - prev_overruns;
....@@ -297,13 +297,14 @@
297297 struct mmiotrace_rw *rw)
298298 {
299299 struct trace_event_call *call = &event_mmiotrace_rw;
300
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
300
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
301301 struct ring_buffer_event *event;
302302 struct trace_mmiotrace_rw *entry;
303
- int pc = preempt_count();
303
+ unsigned int trace_ctx;
304304
305
+ trace_ctx = tracing_gen_ctx_flags(0);
305306 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
306
- sizeof(*entry), 0, pc);
307
+ sizeof(*entry), trace_ctx);
307308 if (!event) {
308309 atomic_inc(&dropped_count);
309310 return;
....@@ -312,13 +313,13 @@
312313 entry->rw = *rw;
313314
314315 if (!call_filter_check_discard(call, entry, buffer, event))
315
- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
316
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
316317 }
317318
318319 void mmio_trace_rw(struct mmiotrace_rw *rw)
319320 {
320321 struct trace_array *tr = mmio_trace_array;
321
- struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
322
+ struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
322323 __trace_mmiotrace_rw(tr, data, rw);
323324 }
324325
....@@ -327,13 +328,14 @@
327328 struct mmiotrace_map *map)
328329 {
329330 struct trace_event_call *call = &event_mmiotrace_map;
330
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
331
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
331332 struct ring_buffer_event *event;
332333 struct trace_mmiotrace_map *entry;
333
- int pc = preempt_count();
334
+ unsigned int trace_ctx;
334335
336
+ trace_ctx = tracing_gen_ctx_flags(0);
335337 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
336
- sizeof(*entry), 0, pc);
338
+ sizeof(*entry), trace_ctx);
337339 if (!event) {
338340 atomic_inc(&dropped_count);
339341 return;
....@@ -342,7 +344,7 @@
342344 entry->map = *map;
343345
344346 if (!call_filter_check_discard(call, entry, buffer, event))
345
- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
347
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
346348 }
347349
348350 void mmio_trace_mapping(struct mmiotrace_map *map)
....@@ -351,7 +353,7 @@
351353 struct trace_array_cpu *data;
352354
353355 preempt_disable();
354
- data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
356
+ data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
355357 __trace_mmiotrace_map(tr, data, map);
356358 preempt_enable();
357359 }