hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/trace/trace_functions_graph.c
....@@ -96,7 +96,8 @@
9696
9797 int __trace_graph_entry(struct trace_array *tr,
9898 struct ftrace_graph_ent *trace,
99
- unsigned int trace_ctx)
99
+ unsigned long flags,
100
+ int pc)
100101 {
101102 struct trace_event_call *call = &event_funcgraph_entry;
102103 struct ring_buffer_event *event;
....@@ -104,7 +105,7 @@
104105 struct ftrace_graph_ent_entry *entry;
105106
106107 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
107
- sizeof(*entry), trace_ctx);
108
+ sizeof(*entry), flags, pc);
108109 if (!event)
109110 return 0;
110111 entry = ring_buffer_event_data(event);
....@@ -128,10 +129,10 @@
128129 struct trace_array *tr = graph_array;
129130 struct trace_array_cpu *data;
130131 unsigned long flags;
131
- unsigned int trace_ctx;
132132 long disabled;
133133 int ret;
134134 int cpu;
135
+ int pc;
135136
136137 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
137138 return 0;
....@@ -173,8 +174,8 @@
173174 data = per_cpu_ptr(tr->array_buffer.data, cpu);
174175 disabled = atomic_inc_return(&data->disabled);
175176 if (likely(disabled == 1)) {
176
- trace_ctx = tracing_gen_ctx_flags(flags);
177
- ret = __trace_graph_entry(tr, trace, trace_ctx);
177
+ pc = preempt_count();
178
+ ret = __trace_graph_entry(tr, trace, flags, pc);
178179 } else {
179180 ret = 0;
180181 }
....@@ -187,7 +188,7 @@
187188
188189 static void
189190 __trace_graph_function(struct trace_array *tr,
190
- unsigned long ip, unsigned int trace_ctx)
191
+ unsigned long ip, unsigned long flags, int pc)
191192 {
192193 u64 time = trace_clock_local();
193194 struct ftrace_graph_ent ent = {
....@@ -201,21 +202,22 @@
201202 .rettime = time,
202203 };
203204
204
- __trace_graph_entry(tr, &ent, trace_ctx);
205
- __trace_graph_return(tr, &ret, trace_ctx);
205
+ __trace_graph_entry(tr, &ent, flags, pc);
206
+ __trace_graph_return(tr, &ret, flags, pc);
206207 }
207208
208209 void
209210 trace_graph_function(struct trace_array *tr,
210211 unsigned long ip, unsigned long parent_ip,
211
- unsigned int trace_ctx)
212
+ unsigned long flags, int pc)
212213 {
213
- __trace_graph_function(tr, ip, trace_ctx);
214
+ __trace_graph_function(tr, ip, flags, pc);
214215 }
215216
216217 void __trace_graph_return(struct trace_array *tr,
217218 struct ftrace_graph_ret *trace,
218
- unsigned int trace_ctx)
219
+ unsigned long flags,
220
+ int pc)
219221 {
220222 struct trace_event_call *call = &event_funcgraph_exit;
221223 struct ring_buffer_event *event;
....@@ -223,7 +225,7 @@
223225 struct ftrace_graph_ret_entry *entry;
224226
225227 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
226
- sizeof(*entry), trace_ctx);
228
+ sizeof(*entry), flags, pc);
227229 if (!event)
228230 return;
229231 entry = ring_buffer_event_data(event);
....@@ -237,9 +239,9 @@
237239 struct trace_array *tr = graph_array;
238240 struct trace_array_cpu *data;
239241 unsigned long flags;
240
- unsigned int trace_ctx;
241242 long disabled;
242243 int cpu;
244
+ int pc;
243245
244246 ftrace_graph_addr_finish(trace);
245247
....@@ -253,8 +255,8 @@
253255 data = per_cpu_ptr(tr->array_buffer.data, cpu);
254256 disabled = atomic_inc_return(&data->disabled);
255257 if (likely(disabled == 1)) {
256
- trace_ctx = tracing_gen_ctx_flags(flags);
257
- __trace_graph_return(tr, trace, trace_ctx);
258
+ pc = preempt_count();
259
+ __trace_graph_return(tr, trace, flags, pc);
258260 }
259261 atomic_dec(&data->disabled);
260262 local_irq_restore(flags);