.. | .. |
---|
96 | 96 | |
---|
97 | 97 | int __trace_graph_entry(struct trace_array *tr, |
---|
98 | 98 | struct ftrace_graph_ent *trace, |
---|
99 | | - unsigned int trace_ctx) |
---|
| 99 | + unsigned long flags, |
---|
| 100 | + int pc) |
---|
100 | 101 | { |
---|
101 | 102 | struct trace_event_call *call = &event_funcgraph_entry; |
---|
102 | 103 | struct ring_buffer_event *event; |
---|
.. | .. |
---|
104 | 105 | struct ftrace_graph_ent_entry *entry; |
---|
105 | 106 | |
---|
106 | 107 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
---|
107 | | - sizeof(*entry), trace_ctx); |
---|
| 108 | + sizeof(*entry), flags, pc); |
---|
108 | 109 | if (!event) |
---|
109 | 110 | return 0; |
---|
110 | 111 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
128 | 129 | struct trace_array *tr = graph_array; |
---|
129 | 130 | struct trace_array_cpu *data; |
---|
130 | 131 | unsigned long flags; |
---|
131 | | - unsigned int trace_ctx; |
---|
132 | 132 | long disabled; |
---|
133 | 133 | int ret; |
---|
134 | 134 | int cpu; |
---|
| 135 | + int pc; |
---|
135 | 136 | |
---|
136 | 137 | if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) |
---|
137 | 138 | return 0; |
---|
.. | .. |
---|
173 | 174 | data = per_cpu_ptr(tr->array_buffer.data, cpu); |
---|
174 | 175 | disabled = atomic_inc_return(&data->disabled); |
---|
175 | 176 | if (likely(disabled == 1)) { |
---|
176 | | - trace_ctx = tracing_gen_ctx_flags(flags); |
---|
177 | | - ret = __trace_graph_entry(tr, trace, trace_ctx); |
---|
| 177 | + pc = preempt_count(); |
---|
| 178 | + ret = __trace_graph_entry(tr, trace, flags, pc); |
---|
178 | 179 | } else { |
---|
179 | 180 | ret = 0; |
---|
180 | 181 | } |
---|
.. | .. |
---|
187 | 188 | |
---|
188 | 189 | static void |
---|
189 | 190 | __trace_graph_function(struct trace_array *tr, |
---|
190 | | - unsigned long ip, unsigned int trace_ctx) |
---|
| 191 | + unsigned long ip, unsigned long flags, int pc) |
---|
191 | 192 | { |
---|
192 | 193 | u64 time = trace_clock_local(); |
---|
193 | 194 | struct ftrace_graph_ent ent = { |
---|
.. | .. |
---|
201 | 202 | .rettime = time, |
---|
202 | 203 | }; |
---|
203 | 204 | |
---|
204 | | - __trace_graph_entry(tr, &ent, trace_ctx); |
---|
205 | | - __trace_graph_return(tr, &ret, trace_ctx); |
---|
| 205 | + __trace_graph_entry(tr, &ent, flags, pc); |
---|
| 206 | + __trace_graph_return(tr, &ret, flags, pc); |
---|
206 | 207 | } |
---|
207 | 208 | |
---|
208 | 209 | void |
---|
209 | 210 | trace_graph_function(struct trace_array *tr, |
---|
210 | 211 | unsigned long ip, unsigned long parent_ip, |
---|
211 | | - unsigned int trace_ctx) |
---|
| 212 | + unsigned long flags, int pc) |
---|
212 | 213 | { |
---|
213 | | - __trace_graph_function(tr, ip, trace_ctx); |
---|
| 214 | + __trace_graph_function(tr, ip, flags, pc); |
---|
214 | 215 | } |
---|
215 | 216 | |
---|
216 | 217 | void __trace_graph_return(struct trace_array *tr, |
---|
217 | 218 | struct ftrace_graph_ret *trace, |
---|
218 | | - unsigned int trace_ctx) |
---|
| 219 | + unsigned long flags, |
---|
| 220 | + int pc) |
---|
219 | 221 | { |
---|
220 | 222 | struct trace_event_call *call = &event_funcgraph_exit; |
---|
221 | 223 | struct ring_buffer_event *event; |
---|
.. | .. |
---|
223 | 225 | struct ftrace_graph_ret_entry *entry; |
---|
224 | 226 | |
---|
225 | 227 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
---|
226 | | - sizeof(*entry), trace_ctx); |
---|
| 228 | + sizeof(*entry), flags, pc); |
---|
227 | 229 | if (!event) |
---|
228 | 230 | return; |
---|
229 | 231 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
237 | 239 | struct trace_array *tr = graph_array; |
---|
238 | 240 | struct trace_array_cpu *data; |
---|
239 | 241 | unsigned long flags; |
---|
240 | | - unsigned int trace_ctx; |
---|
241 | 242 | long disabled; |
---|
242 | 243 | int cpu; |
---|
| 244 | + int pc; |
---|
243 | 245 | |
---|
244 | 246 | ftrace_graph_addr_finish(trace); |
---|
245 | 247 | |
---|
.. | .. |
---|
253 | 255 | data = per_cpu_ptr(tr->array_buffer.data, cpu); |
---|
254 | 256 | disabled = atomic_inc_return(&data->disabled); |
---|
255 | 257 | if (likely(disabled == 1)) { |
---|
256 | | - trace_ctx = tracing_gen_ctx_flags(flags); |
---|
257 | | - __trace_graph_return(tr, trace, trace_ctx); |
---|
| 258 | + pc = preempt_count(); |
---|
| 259 | + __trace_graph_return(tr, trace, flags, pc); |
---|
258 | 260 | } |
---|
259 | 261 | atomic_dec(&data->disabled); |
---|
260 | 262 | local_irq_restore(flags); |
---|