From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/kernel/trace/trace_irqsoff.c |   86 +++++++++++++++++++++++++------------------
 1 files changed, 50 insertions(+), 36 deletions(-)

diff --git a/kernel/kernel/trace/trace_irqsoff.c b/kernel/kernel/trace/trace_irqsoff.c
index f11add8..ee4571b 100644
--- a/kernel/kernel/trace/trace_irqsoff.c
+++ b/kernel/kernel/trace/trace_irqsoff.c
@@ -143,14 +143,11 @@
 	struct trace_array *tr = irqsoff_trace;
 	struct trace_array_cpu *data;
 	unsigned long flags;
-	unsigned int trace_ctx;
 
 	if (!func_prolog_dec(tr, &data, &flags))
 		return;
 
-	trace_ctx = tracing_gen_ctx_flags(flags);
-
-	trace_function(tr, ip, parent_ip, trace_ctx);
+	trace_function(tr, ip, parent_ip, flags, preempt_count());
 
 	atomic_dec(&data->disabled);
 }
@@ -180,8 +177,8 @@
 	struct trace_array *tr = irqsoff_trace;
 	struct trace_array_cpu *data;
 	unsigned long flags;
-	unsigned int trace_ctx;
 	int ret;
+	int pc;
 
 	if (ftrace_graph_ignore_func(trace))
 		return 0;
@@ -198,8 +195,8 @@
 	if (!func_prolog_dec(tr, &data, &flags))
 		return 0;
 
-	trace_ctx = tracing_gen_ctx_flags(flags);
-	ret = __trace_graph_entry(tr, trace, trace_ctx);
+	pc = preempt_count();
+	ret = __trace_graph_entry(tr, trace, flags, pc);
 	atomic_dec(&data->disabled);
 
 	return ret;
@@ -210,15 +207,15 @@
 	struct trace_array *tr = irqsoff_trace;
 	struct trace_array_cpu *data;
 	unsigned long flags;
-	unsigned int trace_ctx;
+	int pc;
 
 	ftrace_graph_addr_finish(trace);
 
 	if (!func_prolog_dec(tr, &data, &flags))
 		return;
 
-	trace_ctx = tracing_gen_ctx_flags(flags);
-	__trace_graph_return(tr, trace, trace_ctx);
+	pc = preempt_count();
+	__trace_graph_return(tr, trace, flags, pc);
 	atomic_dec(&data->disabled);
 }
 
@@ -270,12 +267,12 @@
 static void
 __trace_function(struct trace_array *tr,
 		 unsigned long ip, unsigned long parent_ip,
-		 unsigned int trace_ctx)
+		 unsigned long flags, int pc)
 {
 	if (is_graph(tr))
-		trace_graph_function(tr, ip, parent_ip, trace_ctx);
+		trace_graph_function(tr, ip, parent_ip, flags, pc);
 	else
-		trace_function(tr, ip, parent_ip, trace_ctx);
+		trace_function(tr, ip, parent_ip, flags, pc);
 }
 
 #else
@@ -325,13 +322,15 @@
 {
 	u64 T0, T1, delta;
 	unsigned long flags;
-	unsigned int trace_ctx;
+	int pc;
 
 	T0 = data->preempt_timestamp;
 	T1 = ftrace_now(cpu);
 	delta = T1-T0;
 
-	trace_ctx = tracing_gen_ctx();
+	local_save_flags(flags);
+
+	pc = preempt_count();
 
 	if (!report_latency(tr, delta))
 		goto out;
@@ -342,9 +341,9 @@
 	if (!report_latency(tr, delta))
 		goto out_unlock;
 
-	__trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
+	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 	/* Skip 5 functions to get to the irq/preempt enable function */
-	__trace_stack(tr, trace_ctx, 5);
+	__trace_stack(tr, flags, 5, pc);
 
 	if (data->critical_sequence != max_sequence)
 		goto out_unlock;
@@ -364,15 +363,16 @@
 out:
 	data->critical_sequence = max_sequence;
 	data->preempt_timestamp = ftrace_now(cpu);
-	__trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
+	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 }
 
 static nokprobe_inline void
-start_critical_timing(unsigned long ip, unsigned long parent_ip)
+start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 {
 	int cpu;
 	struct trace_array *tr = irqsoff_trace;
 	struct trace_array_cpu *data;
+	unsigned long flags;
 
 	if (!tracer_enabled || !tracing_is_enabled())
 		return;
@@ -393,7 +393,9 @@
 	data->preempt_timestamp = ftrace_now(cpu);
 	data->critical_start = parent_ip ? : ip;
 
-	__trace_function(tr, ip, parent_ip, tracing_gen_ctx());
+	local_save_flags(flags);
+
+	__trace_function(tr, ip, parent_ip, flags, pc);
 
 	per_cpu(tracing_cpu, cpu) = 1;
 
@@ -401,12 +403,12 @@
 }
 
 static nokprobe_inline void
-stop_critical_timing(unsigned long ip, unsigned long parent_ip)
+stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 {
 	int cpu;
 	struct trace_array *tr = irqsoff_trace;
 	struct trace_array_cpu *data;
-	unsigned int trace_ctx;
+	unsigned long flags;
 
 	cpu = raw_smp_processor_id();
 	/* Always clear the tracing cpu on stopping the trace */
@@ -426,8 +428,8 @@
 
 	atomic_inc(&data->disabled);
 
-	trace_ctx = tracing_gen_ctx();
-	__trace_function(tr, ip, parent_ip, trace_ctx);
+	local_save_flags(flags);
+	__trace_function(tr, ip, parent_ip, flags, pc);
 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
 	data->critical_start = 0;
 	atomic_dec(&data->disabled);
@@ -436,16 +438,20 @@
 /* start and stop critical timings used to for stoppage (in idle) */
 void start_critical_timings(void)
 {
-	if (preempt_trace(preempt_count()) || irq_trace())
-		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+	int pc = preempt_count();
+
+	if (preempt_trace(pc) || irq_trace())
+		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
 }
 EXPORT_SYMBOL_GPL(start_critical_timings);
 NOKPROBE_SYMBOL(start_critical_timings);
 
 void stop_critical_timings(void)
 {
-	if (preempt_trace(preempt_count()) || irq_trace())
-		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+	int pc = preempt_count();
+
+	if (preempt_trace(pc) || irq_trace())
+		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
 }
 EXPORT_SYMBOL_GPL(stop_critical_timings);
 NOKPROBE_SYMBOL(stop_critical_timings);
@@ -607,15 +613,19 @@
  */
 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
 {
-	if (!preempt_trace(preempt_count()) && irq_trace())
-		stop_critical_timing(a0, a1);
+	unsigned int pc = preempt_count();
+
+	if (!preempt_trace(pc) && irq_trace())
+		stop_critical_timing(a0, a1, pc);
 }
 NOKPROBE_SYMBOL(tracer_hardirqs_on);
 
 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
 {
-	if (!preempt_trace(preempt_count()) && irq_trace())
-		start_critical_timing(a0, a1);
+	unsigned int pc = preempt_count();
+
+	if (!preempt_trace(pc) && irq_trace())
+		start_critical_timing(a0, a1, pc);
 }
 NOKPROBE_SYMBOL(tracer_hardirqs_off);
 
@@ -655,14 +665,18 @@
 #ifdef CONFIG_PREEMPT_TRACER
 void tracer_preempt_on(unsigned long a0, unsigned long a1)
 {
-	if (preempt_trace(preempt_count()) && !irq_trace())
-		stop_critical_timing(a0, a1);
+	int pc = preempt_count();
+
+	if (preempt_trace(pc) && !irq_trace())
+		stop_critical_timing(a0, a1, pc);
 }
 
 void tracer_preempt_off(unsigned long a0, unsigned long a1)
 {
-	if (preempt_trace(preempt_count()) && !irq_trace())
-		start_critical_timing(a0, a1);
+	int pc = preempt_count();
+
+	if (preempt_trace(pc) && !irq_trace())
+		start_critical_timing(a0, a1, pc);
 }
 
 static int preemptoff_tracer_init(struct trace_array *tr)

--
Gitblit v1.6.2