.. | .. |
---|
12 | 12 | #include <asm/stacktrace.h> |
---|
13 | 13 | #include <asm/unwind.h> |
---|
14 | 14 | |
---|
15 | | -static int save_stack_address(struct stack_trace *trace, unsigned long addr, |
---|
16 | | - bool nosched) |
---|
17 | | -{ |
---|
18 | | - if (nosched && in_sched_functions(addr)) |
---|
19 | | - return 0; |
---|
20 | | - |
---|
21 | | - if (trace->skip > 0) { |
---|
22 | | - trace->skip--; |
---|
23 | | - return 0; |
---|
24 | | - } |
---|
25 | | - |
---|
26 | | - if (trace->nr_entries >= trace->max_entries) |
---|
27 | | - return -1; |
---|
28 | | - |
---|
29 | | - trace->entries[trace->nr_entries++] = addr; |
---|
30 | | - return 0; |
---|
31 | | -} |
---|
32 | | - |
---|
33 | | -static void noinline __save_stack_trace(struct stack_trace *trace, |
---|
34 | | - struct task_struct *task, struct pt_regs *regs, |
---|
35 | | - bool nosched) |
---|
| 15 | +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, |
---|
| 16 | + struct task_struct *task, struct pt_regs *regs) |
---|
36 | 17 | { |
---|
37 | 18 | struct unwind_state state; |
---|
38 | 19 | unsigned long addr; |
---|
39 | 20 | |
---|
40 | | - if (regs) |
---|
41 | | - save_stack_address(trace, regs->ip, nosched); |
---|
| 21 | + if (regs && !consume_entry(cookie, regs->ip)) |
---|
| 22 | + return; |
---|
42 | 23 | |
---|
43 | 24 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
---|
44 | 25 | unwind_next_frame(&state)) { |
---|
45 | 26 | addr = unwind_get_return_address(&state); |
---|
46 | | - if (!addr || save_stack_address(trace, addr, nosched)) |
---|
| 27 | + if (!addr || !consume_entry(cookie, addr)) |
---|
47 | 28 | break; |
---|
48 | 29 | } |
---|
49 | | - |
---|
50 | | - if (trace->nr_entries < trace->max_entries) |
---|
51 | | - trace->entries[trace->nr_entries++] = ULONG_MAX; |
---|
52 | 30 | } |
---|
53 | 31 | |
---|
54 | 32 | /* |
---|
55 | | - * Save stack-backtrace addresses into a stack_trace buffer. |
---|
| 33 | + * This function returns an error if it detects any unreliable features of the |
---|
| 34 | + * stack. Otherwise it guarantees that the stack trace is reliable. |
---|
| 35 | + * |
---|
| 36 | + * If the task is not 'current', the caller *must* ensure the task is inactive. |
---|
56 | 37 | */ |
---|
57 | | -void save_stack_trace(struct stack_trace *trace) |
---|
58 | | -{ |
---|
59 | | - trace->skip++; |
---|
60 | | - __save_stack_trace(trace, current, NULL, false); |
---|
61 | | -} |
---|
62 | | -EXPORT_SYMBOL_GPL(save_stack_trace); |
---|
63 | | - |
---|
64 | | -void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
---|
65 | | -{ |
---|
66 | | - __save_stack_trace(trace, current, regs, false); |
---|
67 | | -} |
---|
68 | | - |
---|
69 | | -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
---|
70 | | -{ |
---|
71 | | - if (!try_get_task_stack(tsk)) |
---|
72 | | - return; |
---|
73 | | - |
---|
74 | | - if (tsk == current) |
---|
75 | | - trace->skip++; |
---|
76 | | - __save_stack_trace(trace, tsk, NULL, true); |
---|
77 | | - |
---|
78 | | - put_task_stack(tsk); |
---|
79 | | -} |
---|
80 | | -EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
---|
81 | | - |
---|
82 | | -#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE |
---|
83 | | - |
---|
84 | | -static int __always_inline |
---|
85 | | -__save_stack_trace_reliable(struct stack_trace *trace, |
---|
86 | | - struct task_struct *task) |
---|
| 38 | +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, |
---|
| 39 | + void *cookie, struct task_struct *task) |
---|
87 | 40 | { |
---|
88 | 41 | struct unwind_state state; |
---|
89 | 42 | struct pt_regs *regs; |
---|
.. | .. |
---|
97 | 50 | if (regs) { |
---|
98 | 51 | /* Success path for user tasks */ |
---|
99 | 52 | if (user_mode(regs)) |
---|
100 | | - goto success; |
---|
| 53 | + return 0; |
---|
101 | 54 | |
---|
102 | 55 | /* |
---|
103 | 56 | * Kernel mode registers on the stack indicate an |
---|
.. | .. |
---|
105 | 58 | * or a page fault), which can make frame pointers |
---|
106 | 59 | * unreliable. |
---|
107 | 60 | */ |
---|
108 | | - |
---|
109 | 61 | if (IS_ENABLED(CONFIG_FRAME_POINTER)) |
---|
110 | 62 | return -EINVAL; |
---|
111 | 63 | } |
---|
.. | .. |
---|
120 | 72 | if (!addr) |
---|
121 | 73 | return -EINVAL; |
---|
122 | 74 | |
---|
123 | | - if (save_stack_address(trace, addr, false)) |
---|
| 75 | + if (!consume_entry(cookie, addr)) |
---|
124 | 76 | return -EINVAL; |
---|
125 | 77 | } |
---|
126 | 78 | |
---|
.. | .. |
---|
128 | 80 | if (unwind_error(&state)) |
---|
129 | 81 | return -EINVAL; |
---|
130 | 82 | |
---|
131 | | - /* Success path for non-user tasks, i.e. kthreads and idle tasks */ |
---|
132 | | - if (!(task->flags & (PF_KTHREAD | PF_IDLE))) |
---|
133 | | - return -EINVAL; |
---|
134 | | - |
---|
135 | | -success: |
---|
136 | | - if (trace->nr_entries < trace->max_entries) |
---|
137 | | - trace->entries[trace->nr_entries++] = ULONG_MAX; |
---|
138 | | - |
---|
139 | 83 | return 0; |
---|
140 | 84 | } |
---|
141 | | - |
---|
142 | | -/* |
---|
143 | | - * This function returns an error if it detects any unreliable features of the |
---|
144 | | - * stack. Otherwise it guarantees that the stack trace is reliable. |
---|
145 | | - * |
---|
146 | | - * If the task is not 'current', the caller *must* ensure the task is inactive. |
---|
147 | | - */ |
---|
148 | | -int save_stack_trace_tsk_reliable(struct task_struct *tsk, |
---|
149 | | - struct stack_trace *trace) |
---|
150 | | -{ |
---|
151 | | - int ret; |
---|
152 | | - |
---|
153 | | - /* |
---|
154 | | - * If the task doesn't have a stack (e.g., a zombie), the stack is |
---|
155 | | - * "reliably" empty. |
---|
156 | | - */ |
---|
157 | | - if (!try_get_task_stack(tsk)) |
---|
158 | | - return 0; |
---|
159 | | - |
---|
160 | | - ret = __save_stack_trace_reliable(trace, tsk); |
---|
161 | | - |
---|
162 | | - put_task_stack(tsk); |
---|
163 | | - |
---|
164 | | - return ret; |
---|
165 | | -} |
---|
166 | | -#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ |
---|
167 | 85 | |
---|
168 | 86 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ |
---|
169 | 87 | |
---|
.. | .. |
---|
173 | 91 | }; |
---|
174 | 92 | |
---|
175 | 93 | static int |
---|
176 | | -copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) |
---|
| 94 | +copy_stack_frame(const struct stack_frame_user __user *fp, |
---|
| 95 | + struct stack_frame_user *frame) |
---|
177 | 96 | { |
---|
178 | 97 | int ret; |
---|
179 | 98 | |
---|
180 | | - if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) |
---|
| 99 | + if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) |
---|
181 | 100 | return 0; |
---|
182 | 101 | |
---|
183 | 102 | ret = 1; |
---|
184 | 103 | pagefault_disable(); |
---|
185 | | - if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) |
---|
| 104 | + if (__get_user(frame->next_fp, &fp->next_fp) || |
---|
| 105 | + __get_user(frame->ret_addr, &fp->ret_addr)) |
---|
186 | 106 | ret = 0; |
---|
187 | 107 | pagefault_enable(); |
---|
188 | 108 | |
---|
189 | 109 | return ret; |
---|
190 | 110 | } |
---|
191 | 111 | |
---|
192 | | -static inline void __save_stack_trace_user(struct stack_trace *trace) |
---|
| 112 | +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, |
---|
| 113 | + const struct pt_regs *regs) |
---|
193 | 114 | { |
---|
194 | | - const struct pt_regs *regs = task_pt_regs(current); |
---|
195 | 115 | const void __user *fp = (const void __user *)regs->bp; |
---|
196 | 116 | |
---|
197 | | - if (trace->nr_entries < trace->max_entries) |
---|
198 | | - trace->entries[trace->nr_entries++] = regs->ip; |
---|
| 117 | + if (!consume_entry(cookie, regs->ip)) |
---|
| 118 | + return; |
---|
199 | 119 | |
---|
200 | | - while (trace->nr_entries < trace->max_entries) { |
---|
| 120 | + while (1) { |
---|
201 | 121 | struct stack_frame_user frame; |
---|
202 | 122 | |
---|
203 | 123 | frame.next_fp = NULL; |
---|
.. | .. |
---|
206 | 126 | break; |
---|
207 | 127 | if ((unsigned long)fp < regs->sp) |
---|
208 | 128 | break; |
---|
209 | | - if (frame.ret_addr) { |
---|
210 | | - trace->entries[trace->nr_entries++] = |
---|
211 | | - frame.ret_addr; |
---|
212 | | - } |
---|
213 | | - if (fp == frame.next_fp) |
---|
| 129 | + if (!frame.ret_addr) |
---|
| 130 | + break; |
---|
| 131 | + if (!consume_entry(cookie, frame.ret_addr)) |
---|
214 | 132 | break; |
---|
215 | 133 | fp = frame.next_fp; |
---|
216 | 134 | } |
---|
217 | 135 | } |
---|
218 | 136 | |
---|
219 | | -void save_stack_trace_user(struct stack_trace *trace) |
---|
220 | | -{ |
---|
221 | | - /* |
---|
222 | | - * Trace user stack if we are not a kernel thread |
---|
223 | | - */ |
---|
224 | | - if (current->mm) { |
---|
225 | | - __save_stack_trace_user(trace); |
---|
226 | | - } |
---|
227 | | - if (trace->nr_entries < trace->max_entries) |
---|
228 | | - trace->entries[trace->nr_entries++] = ULONG_MAX; |
---|
229 | | -} |
---|