hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/trace/trace_events.c
....@@ -184,8 +184,6 @@
184184 __common_field(unsigned char, flags);
185185 __common_field(unsigned char, preempt_count);
186186 __common_field(int, pid);
187
- __common_field(unsigned char, migrate_disable);
188
- __common_field(unsigned char, preempt_lazy_count);
189187
190188 return ret;
191189 }
....@@ -261,19 +259,22 @@
261259 trace_event_ignore_this_pid(trace_file))
262260 return NULL;
263261
262
+ local_save_flags(fbuffer->flags);
263
+ fbuffer->pc = preempt_count();
264264 /*
265265 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
266266 * preemption (adding one to the preempt_count). Since we are
267267 * interested in the preempt_count at the time the tracepoint was
268268 * hit, we need to subtract one to offset the increment.
269269 */
270
- fbuffer->trace_ctx = tracing_gen_ctx_dec();
270
+ if (IS_ENABLED(CONFIG_PREEMPTION))
271
+ fbuffer->pc--;
271272 fbuffer->trace_file = trace_file;
272273
273274 fbuffer->event =
274275 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
275276 event_call->event.type, len,
276
- fbuffer->trace_ctx);
277
+ fbuffer->flags, fbuffer->pc);
277278 if (!fbuffer->event)
278279 return NULL;
279280
....@@ -370,7 +371,6 @@
370371 {
371372 struct trace_event_call *call = file->event_call;
372373 struct trace_array *tr = file->tr;
373
- unsigned long file_flags = file->flags;
374374 int ret = 0;
375375 int disable;
376376
....@@ -394,6 +394,8 @@
394394 break;
395395 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
396396 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
397
+ /* Disable use of trace_buffered_event */
398
+ trace_buffered_event_disable();
397399 } else
398400 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
399401
....@@ -432,6 +434,8 @@
432434 if (atomic_inc_return(&file->sm_ref) > 1)
433435 break;
434436 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
437
+ /* Enable use of trace_buffered_event */
438
+ trace_buffered_event_enable();
435439 }
436440
437441 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
....@@ -469,15 +473,6 @@
469473 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
470474 }
471475 break;
472
- }
473
-
474
- /* Enable or disable use of trace_buffered_event */
475
- if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
476
- (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
477
- if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
478
- trace_buffered_event_enable();
479
- else
480
- trace_buffered_event_disable();
481476 }
482477
483478 return ret;
....@@ -1860,9 +1855,10 @@
18601855 };
18611856
18621857 static const struct file_operations ftrace_enable_fops = {
1863
- .open = tracing_open_generic,
1858
+ .open = tracing_open_file_tr,
18641859 .read = event_enable_read,
18651860 .write = event_enable_write,
1861
+ .release = tracing_release_file_tr,
18661862 .llseek = default_llseek,
18671863 };
18681864
....@@ -1879,9 +1875,10 @@
18791875 };
18801876
18811877 static const struct file_operations ftrace_event_filter_fops = {
1882
- .open = tracing_open_generic,
1878
+ .open = tracing_open_file_tr,
18831879 .read = event_filter_read,
18841880 .write = event_filter_write,
1881
+ .release = tracing_release_file_tr,
18851882 .llseek = default_llseek,
18861883 };
18871884
....@@ -2660,7 +2657,7 @@
26602657 * over from this module may be passed to the new module events and
26612658 * unexpected results may occur.
26622659 */
2663
- tracing_reset_all_online_cpus();
2660
+ tracing_reset_all_online_cpus_unlocked();
26642661 }
26652662
26662663 static int trace_module_notify(struct notifier_block *self,
....@@ -3698,11 +3695,12 @@
36983695 struct trace_buffer *buffer;
36993696 struct ring_buffer_event *event;
37003697 struct ftrace_entry *entry;
3701
- unsigned int trace_ctx;
3698
+ unsigned long flags;
37023699 long disabled;
37033700 int cpu;
3701
+ int pc;
37043702
3705
- trace_ctx = tracing_gen_ctx();
3703
+ pc = preempt_count();
37063704 preempt_disable_notrace();
37073705 cpu = raw_smp_processor_id();
37083706 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
....@@ -3710,9 +3708,11 @@
37103708 if (disabled != 1)
37113709 goto out;
37123710
3711
+ local_save_flags(flags);
3712
+
37133713 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
37143714 TRACE_FN, sizeof(*entry),
3715
- trace_ctx);
3715
+ flags, pc);
37163716 if (!event)
37173717 goto out;
37183718 entry = ring_buffer_event_data(event);
....@@ -3720,7 +3720,7 @@
37203720 entry->parent_ip = parent_ip;
37213721
37223722 event_trigger_unlock_commit(&event_trace_file, buffer, event,
3723
- entry, trace_ctx);
3723
+ entry, flags, pc);
37243724 out:
37253725 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
37263726 preempt_enable_notrace();