From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/kernel/trace/trace_events.c |   20 ++++++++++++--------
 1 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/kernel/kernel/trace/trace_events.c b/kernel/kernel/trace/trace_events.c
index ca64598..bac13f2 100644
--- a/kernel/kernel/trace/trace_events.c
+++ b/kernel/kernel/trace/trace_events.c
@@ -184,8 +184,6 @@
 	__common_field(unsigned char, flags);
 	__common_field(unsigned char, preempt_count);
 	__common_field(int, pid);
-	__common_field(unsigned char, migrate_disable);
-	__common_field(unsigned char, preempt_lazy_count);
 
 	return ret;
 }
@@ -261,19 +259,22 @@
 	    trace_event_ignore_this_pid(trace_file))
 		return NULL;
 
+	local_save_flags(fbuffer->flags);
+	fbuffer->pc = preempt_count();
 	/*
 	 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
 	 * preemption (adding one to the preempt_count). Since we are
 	 * interested in the preempt_count at the time the tracepoint was
 	 * hit, we need to subtract one to offset the increment.
 	 */
-	fbuffer->trace_ctx = tracing_gen_ctx_dec();
+	if (IS_ENABLED(CONFIG_PREEMPTION))
+		fbuffer->pc--;
 	fbuffer->trace_file = trace_file;
 
 	fbuffer->event =
 		trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
 						event_call->event.type, len,
-						fbuffer->trace_ctx);
+						fbuffer->flags, fbuffer->pc);
 	if (!fbuffer->event)
 		return NULL;
 
@@ -3698,11 +3699,12 @@
 	struct trace_buffer *buffer;
 	struct ring_buffer_event *event;
 	struct ftrace_entry *entry;
-	unsigned int trace_ctx;
+	unsigned long flags;
 	long disabled;
 	int cpu;
+	int pc;
 
-	trace_ctx = tracing_gen_ctx();
+	pc = preempt_count();
 	preempt_disable_notrace();
 	cpu = raw_smp_processor_id();
 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
@@ -3710,9 +3712,11 @@
 	if (disabled != 1)
 		goto out;
 
+	local_save_flags(flags);
+
 	event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
 						TRACE_FN, sizeof(*entry),
-						trace_ctx);
+						flags, pc);
 	if (!event)
 		goto out;
 	entry	= ring_buffer_event_data(event);
@@ -3720,7 +3724,7 @@
 	entry->parent_ip		= parent_ip;
 
 	event_trigger_unlock_commit(&event_trace_file, buffer, event,
-				    entry, trace_ctx);
+				    entry, flags, pc);
  out:
 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
 	preempt_enable_notrace();

--
Gitblit v1.6.2