From 9999e48639b3cecb08ffb37358bcba3b48161b29 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 08:50:17 +0000
Subject: [PATCH] add ax88772_rst
---
kernel/kernel/trace/trace_sched_wakeup.c | 73 ++++++++++++++++++++----------------
1 files changed, 40 insertions(+), 33 deletions(-)
diff --git a/kernel/kernel/trace/trace_sched_wakeup.c b/kernel/kernel/trace/trace_sched_wakeup.c
index f1c6033..037e1e8 100644
--- a/kernel/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/kernel/trace/trace_sched_wakeup.c
@@ -67,7 +67,7 @@
static int
func_prolog_preempt_disable(struct trace_array *tr,
struct trace_array_cpu **data,
- unsigned int *trace_ctx)
+ int *pc)
{
long disabled;
int cpu;
@@ -75,7 +75,7 @@
if (likely(!wakeup_task))
return 0;
- *trace_ctx = tracing_gen_ctx();
+ *pc = preempt_count();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
@@ -116,8 +116,8 @@
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned int trace_ctx;
- int ret = 0;
+ unsigned long flags;
+ int pc, ret = 0;
if (ftrace_graph_ignore_func(trace))
return 0;
@@ -131,10 +131,11 @@
if (ftrace_graph_notrace_addr(trace->func))
return 1;
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
return 0;
- ret = __trace_graph_entry(tr, trace, trace_ctx);
+ local_save_flags(flags);
+ ret = __trace_graph_entry(tr, trace, flags, pc);
atomic_dec(&data->disabled);
preempt_enable_notrace();
@@ -145,14 +146,16 @@
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned int trace_ctx;
+ unsigned long flags;
+ int pc;
ftrace_graph_addr_finish(trace);
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
return;
- __trace_graph_return(tr, trace, trace_ctx);
+ local_save_flags(flags);
+ __trace_graph_return(tr, trace, flags, pc);
atomic_dec(&data->disabled);
preempt_enable_notrace();
@@ -168,6 +171,8 @@
{
if (is_graph(iter->tr))
graph_trace_open(iter);
+ else
+ iter->private = NULL;
}
static void wakeup_trace_close(struct trace_iterator *iter)
@@ -214,13 +219,13 @@
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned long flags;
- unsigned int trace_ctx;
+ int pc;
- if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
return;
local_irq_save(flags);
- trace_function(tr, ip, parent_ip, trace_ctx);
+ trace_function(tr, ip, parent_ip, flags, pc);
local_irq_restore(flags);
atomic_dec(&data->disabled);
@@ -300,12 +305,12 @@
static void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
if (is_graph(tr))
- trace_graph_function(tr, ip, parent_ip, trace_ctx);
+ trace_graph_function(tr, ip, parent_ip, flags, pc);
else
- trace_function(tr, ip, parent_ip, trace_ctx);
+ trace_function(tr, ip, parent_ip, flags, pc);
}
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
@@ -372,7 +377,7 @@
tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
struct task_struct *next,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
struct trace_event_call *call = &event_context_switch;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -380,7 +385,7 @@
struct ctx_switch_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
- sizeof(*entry), trace_ctx);
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -393,14 +398,14 @@
entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
}
static void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee,
struct task_struct *curr,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
@@ -408,7 +413,7 @@
struct trace_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
- sizeof(*entry), trace_ctx);
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -421,7 +426,7 @@
entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
}
static void notrace
@@ -433,7 +438,7 @@
unsigned long flags;
long disabled;
int cpu;
- unsigned int trace_ctx;
+ int pc;
tracing_record_cmdline(prev);
@@ -452,6 +457,8 @@
if (next != wakeup_task)
return;
+ pc = preempt_count();
+
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
@@ -459,8 +466,6 @@
goto out;
local_irq_save(flags);
- trace_ctx = tracing_gen_ctx_flags(flags);
-
arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
@@ -470,9 +475,9 @@
/* The task we are waiting for is waking up */
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
- tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
- __trace_stack(wakeup_trace, trace_ctx, 0);
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
+ tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
+ __trace_stack(wakeup_trace, flags, 0, pc);
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
@@ -524,8 +529,9 @@
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
+ unsigned long flags;
long disabled;
- unsigned int trace_ctx;
+ int pc;
if (likely(!tracer_enabled))
return;
@@ -546,11 +552,10 @@
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;
+ pc = preempt_count();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
-
- trace_ctx = tracing_gen_ctx();
/* interrupts should be off from try_to_wake_up */
arch_spin_lock(&wakeup_lock);
@@ -578,17 +583,19 @@
wakeup_task = get_task_struct(p);
+ local_save_flags(flags);
+
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
- tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
- __trace_stack(wakeup_trace, trace_ctx, 0);
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+ __trace_stack(wakeup_trace, flags, 0, pc);
/*
* We must be careful in using CALLER_ADDR2. But since wake_up
* is not called by an assembly function (where as schedule is)
* it should be safe to use it here.
*/
- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
arch_spin_unlock(&wakeup_lock);
--
Gitblit v1.6.2