.. | .. |
---|
300 | 300 | struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
301 | 301 | struct ring_buffer_event *event; |
---|
302 | 302 | struct trace_mmiotrace_rw *entry; |
---|
303 | | - unsigned int trace_ctx; |
---|
| 303 | + int pc = preempt_count(); |
---|
304 | 304 | |
---|
305 | | - trace_ctx = tracing_gen_ctx_flags(0); |
---|
306 | 305 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
---|
307 | | - sizeof(*entry), trace_ctx); |
---|
| 306 | + sizeof(*entry), 0, pc); |
---|
308 | 307 | if (!event) { |
---|
309 | 308 | atomic_inc(&dropped_count); |
---|
310 | 309 | return; |
---|
.. | .. |
---|
313 | 312 | entry->rw = *rw; |
---|
314 | 313 | |
---|
315 | 314 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
316 | | - trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); |
---|
| 315 | + trace_buffer_unlock_commit(tr, buffer, event, 0, pc); |
---|
317 | 316 | } |
---|
318 | 317 | |
---|
319 | 318 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
---|
.. | .. |
---|
331 | 330 | struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
332 | 331 | struct ring_buffer_event *event; |
---|
333 | 332 | struct trace_mmiotrace_map *entry; |
---|
334 | | - unsigned int trace_ctx; |
---|
| 333 | + int pc = preempt_count(); |
---|
335 | 334 | |
---|
336 | | - trace_ctx = tracing_gen_ctx_flags(0); |
---|
337 | 335 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
---|
338 | | - sizeof(*entry), trace_ctx); |
---|
| 336 | + sizeof(*entry), 0, pc); |
---|
339 | 337 | if (!event) { |
---|
340 | 338 | atomic_inc(&dropped_count); |
---|
341 | 339 | return; |
---|
.. | .. |
---|
344 | 342 | entry->map = *map; |
---|
345 | 343 | |
---|
346 | 344 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
347 | | - trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); |
---|
| 345 | + trace_buffer_unlock_commit(tr, buffer, event, 0, pc); |
---|
348 | 346 | } |
---|
349 | 347 | |
---|
350 | 348 | void mmio_trace_mapping(struct mmiotrace_map *map) |
---|