From 9999e48639b3cecb08ffb37358bcba3b48161b29 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 08:50:17 +0000
Subject: [PATCH] add ax88772_rst
---
kernel/kernel/trace/trace_irqsoff.c | 89 ++++++++++++++++++++++++++------------------
1 files changed, 52 insertions(+), 37 deletions(-)
diff --git a/kernel/kernel/trace/trace_irqsoff.c b/kernel/kernel/trace/trace_irqsoff.c
index f11add8..619a609 100644
--- a/kernel/kernel/trace/trace_irqsoff.c
+++ b/kernel/kernel/trace/trace_irqsoff.c
@@ -143,14 +143,11 @@
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
- unsigned int trace_ctx;
if (!func_prolog_dec(tr, &data, &flags))
return;
- trace_ctx = tracing_gen_ctx_flags(flags);
-
- trace_function(tr, ip, parent_ip, trace_ctx);
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
atomic_dec(&data->disabled);
}
@@ -180,8 +177,8 @@
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
- unsigned int trace_ctx;
int ret;
+ int pc;
if (ftrace_graph_ignore_func(trace))
return 0;
@@ -198,8 +195,8 @@
if (!func_prolog_dec(tr, &data, &flags))
return 0;
- trace_ctx = tracing_gen_ctx_flags(flags);
- ret = __trace_graph_entry(tr, trace, trace_ctx);
+ pc = preempt_count();
+ ret = __trace_graph_entry(tr, trace, flags, pc);
atomic_dec(&data->disabled);
return ret;
@@ -210,15 +207,15 @@
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
- unsigned int trace_ctx;
+ int pc;
ftrace_graph_addr_finish(trace);
if (!func_prolog_dec(tr, &data, &flags))
return;
- trace_ctx = tracing_gen_ctx_flags(flags);
- __trace_graph_return(tr, trace, trace_ctx);
+ pc = preempt_count();
+ __trace_graph_return(tr, trace, flags, pc);
atomic_dec(&data->disabled);
}
@@ -231,7 +228,8 @@
{
if (is_graph(iter->tr))
graph_trace_open(iter);
-
+ else
+ iter->private = NULL;
}
static void irqsoff_trace_close(struct trace_iterator *iter)
@@ -270,12 +268,12 @@
static void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
if (is_graph(tr))
- trace_graph_function(tr, ip, parent_ip, trace_ctx);
+ trace_graph_function(tr, ip, parent_ip, flags, pc);
else
- trace_function(tr, ip, parent_ip, trace_ctx);
+ trace_function(tr, ip, parent_ip, flags, pc);
}
#else
@@ -325,13 +323,15 @@
{
u64 T0, T1, delta;
unsigned long flags;
- unsigned int trace_ctx;
+ int pc;
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
delta = T1-T0;
- trace_ctx = tracing_gen_ctx();
+ local_save_flags(flags);
+
+ pc = preempt_count();
if (!report_latency(tr, delta))
goto out;
@@ -342,9 +342,9 @@
if (!report_latency(tr, delta))
goto out_unlock;
- __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
/* Skip 5 functions to get to the irq/preempt enable function */
- __trace_stack(tr, trace_ctx, 5);
+ __trace_stack(tr, flags, 5, pc);
if (data->critical_sequence != max_sequence)
goto out_unlock;
@@ -364,15 +364,16 @@
out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
- __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
static nokprobe_inline void
-start_critical_timing(unsigned long ip, unsigned long parent_ip)
+start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
+ unsigned long flags;
if (!tracer_enabled || !tracing_is_enabled())
return;
@@ -393,7 +394,9 @@
data->preempt_timestamp = ftrace_now(cpu);
data->critical_start = parent_ip ? : ip;
- __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
+ local_save_flags(flags);
+
+ __trace_function(tr, ip, parent_ip, flags, pc);
per_cpu(tracing_cpu, cpu) = 1;
@@ -401,12 +404,12 @@
}
static nokprobe_inline void
-stop_critical_timing(unsigned long ip, unsigned long parent_ip)
+stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
- unsigned int trace_ctx;
+ unsigned long flags;
cpu = raw_smp_processor_id();
/* Always clear the tracing cpu on stopping the trace */
@@ -426,8 +429,8 @@
atomic_inc(&data->disabled);
- trace_ctx = tracing_gen_ctx();
- __trace_function(tr, ip, parent_ip, trace_ctx);
+ local_save_flags(flags);
+ __trace_function(tr, ip, parent_ip, flags, pc);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
@@ -436,16 +439,20 @@
/* start and stop critical timings used to for stoppage (in idle) */
void start_critical_timings(void)
{
- if (preempt_trace(preempt_count()) || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ int pc = preempt_count();
+
+ if (preempt_trace(pc) || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
NOKPROBE_SYMBOL(start_critical_timings);
void stop_critical_timings(void)
{
- if (preempt_trace(preempt_count()) || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ int pc = preempt_count();
+
+ if (preempt_trace(pc) || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
}
EXPORT_SYMBOL_GPL(stop_critical_timings);
NOKPROBE_SYMBOL(stop_critical_timings);
@@ -607,15 +614,19 @@
*/
void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
{
- if (!preempt_trace(preempt_count()) && irq_trace())
- stop_critical_timing(a0, a1);
+ unsigned int pc = preempt_count();
+
+ if (!preempt_trace(pc) && irq_trace())
+ stop_critical_timing(a0, a1, pc);
}
NOKPROBE_SYMBOL(tracer_hardirqs_on);
void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
{
- if (!preempt_trace(preempt_count()) && irq_trace())
- start_critical_timing(a0, a1);
+ unsigned int pc = preempt_count();
+
+ if (!preempt_trace(pc) && irq_trace())
+ start_critical_timing(a0, a1, pc);
}
NOKPROBE_SYMBOL(tracer_hardirqs_off);
@@ -655,14 +666,18 @@
#ifdef CONFIG_PREEMPT_TRACER
void tracer_preempt_on(unsigned long a0, unsigned long a1)
{
- if (preempt_trace(preempt_count()) && !irq_trace())
- stop_critical_timing(a0, a1);
+ int pc = preempt_count();
+
+ if (preempt_trace(pc) && !irq_trace())
+ stop_critical_timing(a0, a1, pc);
}
void tracer_preempt_off(unsigned long a0, unsigned long a1)
{
- if (preempt_trace(preempt_count()) && !irq_trace())
- start_critical_timing(a0, a1);
+ int pc = preempt_count();
+
+ if (preempt_trace(pc) && !irq_trace())
+ start_critical_timing(a0, a1, pc);
}
static int preemptoff_tracer_init(struct trace_array *tr)
--
Gitblit v1.6.2