.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Stack tracing support |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2012 ARM Ltd. |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope that it will be useful, |
---|
11 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
12 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
13 | | - * GNU General Public License for more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License |
---|
16 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
17 | 6 | */ |
---|
18 | 7 | #include <linux/kernel.h> |
---|
19 | 8 | #include <linux/export.h> |
---|
.. | .. |
---|
25 | 14 | #include <linux/stacktrace.h> |
---|
26 | 15 | |
---|
27 | 16 | #include <asm/irq.h> |
---|
| 17 | +#include <asm/pointer_auth.h> |
---|
28 | 18 | #include <asm/stack_pointer.h> |
---|
29 | 19 | #include <asm/stacktrace.h> |
---|
30 | 20 | |
---|
.. | .. |
---|
41 | 31 | * ldp x29, x30, [sp] |
---|
42 | 32 | * add sp, sp, #0x10 |
---|
43 | 33 | */ |
---|
| 34 | + |
---|
| 35 | +/* |
---|
| 36 | + * Unwind from one frame record (A) to the next frame record (B). |
---|
| 37 | + * |
---|
| 38 | + * We terminate early if the location of B indicates a malformed chain of frame |
---|
| 39 | + * records (e.g. a cycle), determined based on the location and fp value of A |
---|
| 40 | + * and the location (but not the fp value) of B. |
---|
| 41 | + */ |
---|
44 | 42 | int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) |
---|
45 | 43 | { |
---|
46 | 44 | unsigned long fp = frame->fp; |
---|
| 45 | + struct stack_info info; |
---|
47 | 46 | |
---|
48 | 47 | if (fp & 0xf) |
---|
49 | 48 | return -EINVAL; |
---|
.. | .. |
---|
51 | 50 | if (!tsk) |
---|
52 | 51 | tsk = current; |
---|
53 | 52 | |
---|
54 | | - if (!on_accessible_stack(tsk, fp, NULL)) |
---|
| 53 | + if (!on_accessible_stack(tsk, fp, &info)) |
---|
55 | 54 | return -EINVAL; |
---|
56 | 55 | |
---|
| 56 | + if (test_bit(info.type, frame->stacks_done)) |
---|
| 57 | + return -EINVAL; |
---|
| 58 | + |
---|
| 59 | + /* |
---|
| 60 | + * As stacks grow downward, any valid record on the same stack must be |
---|
| 61 | + * at a strictly higher address than the prior record. |
---|
| 62 | + * |
---|
| 63 | + * Stacks can nest in several valid orders, e.g. |
---|
| 64 | + * |
---|
| 65 | + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL |
---|
| 66 | + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW |
---|
| 67 | + * |
---|
| 68 | + * ... but the nesting itself is strict. Once we transition from one |
---|
| 69 | + * stack to another, it's never valid to unwind back to that first |
---|
| 70 | + * stack. |
---|
| 71 | + */ |
---|
| 72 | + if (info.type == frame->prev_type) { |
---|
| 73 | + if (fp <= frame->prev_fp) |
---|
| 74 | + return -EINVAL; |
---|
| 75 | + } else { |
---|
| 76 | + set_bit(frame->prev_type, frame->stacks_done); |
---|
| 77 | + } |
---|
| 78 | + |
---|
| 79 | + /* |
---|
| 80 | + * Record this frame record's values and location. The prev_fp and |
---|
| 81 | + * prev_type are only meaningful to the next unwind_frame() invocation. |
---|
| 82 | + */ |
---|
57 | 83 | frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); |
---|
58 | 84 | frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); |
---|
| 85 | + frame->prev_fp = fp; |
---|
| 86 | + frame->prev_type = info.type; |
---|
59 | 87 | |
---|
60 | 88 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
61 | 89 | if (tsk->ret_stack && |
---|
62 | | - (frame->pc == (unsigned long)return_to_handler)) { |
---|
63 | | - if (WARN_ON_ONCE(frame->graph == -1)) |
---|
64 | | - return -EINVAL; |
---|
65 | | - if (frame->graph < -1) |
---|
66 | | - frame->graph += FTRACE_NOTRACE_DEPTH; |
---|
67 | | - |
---|
| 90 | + (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) { |
---|
| 91 | + struct ftrace_ret_stack *ret_stack; |
---|
68 | 92 | /* |
---|
69 | 93 | * This is a case where function graph tracer has |
---|
70 | 94 | * modified a return address (LR) in a stack frame |
---|
71 | 95 | * to hook a function return. |
---|
72 | 96 | * So replace it to an original value. |
---|
73 | 97 | */ |
---|
74 | | - frame->pc = tsk->ret_stack[frame->graph--].ret; |
---|
| 98 | + ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++); |
---|
| 99 | + if (WARN_ON_ONCE(!ret_stack)) |
---|
| 100 | + return -EINVAL; |
---|
| 101 | + frame->pc = ret_stack->ret; |
---|
75 | 102 | } |
---|
76 | 103 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
---|
| 104 | + |
---|
| 105 | + frame->pc = ptrauth_strip_insn_pac(frame->pc); |
---|
77 | 106 | |
---|
78 | 107 | /* |
---|
79 | 108 | * Frames created upon entry from EL0 have NULL FP and PC values, so |
---|
.. | .. |
---|
89 | 118 | NOKPROBE_SYMBOL(unwind_frame); |
---|
90 | 119 | |
---|
91 | 120 | void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, |
---|
92 | | - int (*fn)(struct stackframe *, void *), void *data) |
---|
| 121 | + bool (*fn)(void *, unsigned long), void *data) |
---|
93 | 122 | { |
---|
94 | 123 | while (1) { |
---|
95 | 124 | int ret; |
---|
96 | 125 | |
---|
97 | | - if (fn(frame, data)) |
---|
| 126 | + if (!fn(data, frame->pc)) |
---|
98 | 127 | break; |
---|
99 | 128 | ret = unwind_frame(tsk, frame); |
---|
100 | 129 | if (ret < 0) |
---|
.. | .. |
---|
103 | 132 | } |
---|
104 | 133 | NOKPROBE_SYMBOL(walk_stackframe); |
---|
105 | 134 | |
---|
106 | | -#ifdef CONFIG_STACKTRACE |
---|
107 | | -struct stack_trace_data { |
---|
108 | | - struct stack_trace *trace; |
---|
109 | | - unsigned int no_sched_functions; |
---|
110 | | - unsigned int skip; |
---|
111 | | -}; |
---|
112 | | - |
---|
113 | | -static int save_trace(struct stackframe *frame, void *d) |
---|
| 135 | +static void dump_backtrace_entry(unsigned long where, const char *loglvl) |
---|
114 | 136 | { |
---|
115 | | - struct stack_trace_data *data = d; |
---|
116 | | - struct stack_trace *trace = data->trace; |
---|
117 | | - unsigned long addr = frame->pc; |
---|
| 137 | + printk("%s %pS\n", loglvl, (void *)where); |
---|
| 138 | +} |
---|
118 | 139 | |
---|
119 | | - if (data->no_sched_functions && in_sched_functions(addr)) |
---|
120 | | - return 0; |
---|
121 | | - if (data->skip) { |
---|
122 | | - data->skip--; |
---|
123 | | - return 0; |
---|
| 140 | +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, |
---|
| 141 | + const char *loglvl) |
---|
| 142 | +{ |
---|
| 143 | + struct stackframe frame; |
---|
| 144 | + int skip = 0; |
---|
| 145 | + |
---|
| 146 | + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
---|
| 147 | + |
---|
| 148 | + if (regs) { |
---|
| 149 | + if (user_mode(regs)) |
---|
| 150 | + return; |
---|
| 151 | + skip = 1; |
---|
124 | 152 | } |
---|
125 | 153 | |
---|
126 | | - trace->entries[trace->nr_entries++] = addr; |
---|
127 | | - |
---|
128 | | - return trace->nr_entries >= trace->max_entries; |
---|
129 | | -} |
---|
130 | | - |
---|
131 | | -void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
---|
132 | | -{ |
---|
133 | | - struct stack_trace_data data; |
---|
134 | | - struct stackframe frame; |
---|
135 | | - |
---|
136 | | - data.trace = trace; |
---|
137 | | - data.skip = trace->skip; |
---|
138 | | - data.no_sched_functions = 0; |
---|
139 | | - |
---|
140 | | - frame.fp = regs->regs[29]; |
---|
141 | | - frame.pc = regs->pc; |
---|
142 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
143 | | - frame.graph = current->curr_ret_stack; |
---|
144 | | -#endif |
---|
145 | | - |
---|
146 | | - walk_stackframe(current, &frame, save_trace, &data); |
---|
147 | | - if (trace->nr_entries < trace->max_entries) |
---|
148 | | - trace->entries[trace->nr_entries++] = ULONG_MAX; |
---|
149 | | -} |
---|
150 | | - |
---|
151 | | -static noinline void __save_stack_trace(struct task_struct *tsk, |
---|
152 | | - struct stack_trace *trace, unsigned int nosched) |
---|
153 | | -{ |
---|
154 | | - struct stack_trace_data data; |
---|
155 | | - struct stackframe frame; |
---|
| 154 | + if (!tsk) |
---|
| 155 | + tsk = current; |
---|
156 | 156 | |
---|
157 | 157 | if (!try_get_task_stack(tsk)) |
---|
158 | 158 | return; |
---|
159 | 159 | |
---|
160 | | - data.trace = trace; |
---|
161 | | - data.skip = trace->skip; |
---|
162 | | - data.no_sched_functions = nosched; |
---|
163 | | - |
---|
164 | | - if (tsk != current) { |
---|
165 | | - frame.fp = thread_saved_fp(tsk); |
---|
166 | | - frame.pc = thread_saved_pc(tsk); |
---|
| 160 | + if (tsk == current) { |
---|
| 161 | + start_backtrace(&frame, |
---|
| 162 | + (unsigned long)__builtin_frame_address(0), |
---|
| 163 | + (unsigned long)dump_backtrace); |
---|
167 | 164 | } else { |
---|
168 | | - /* We don't want this function nor the caller */ |
---|
169 | | - data.skip += 2; |
---|
170 | | - frame.fp = (unsigned long)__builtin_frame_address(0); |
---|
171 | | - frame.pc = (unsigned long)__save_stack_trace; |
---|
| 165 | + /* |
---|
| 166 | + * task blocked in __switch_to |
---|
| 167 | + */ |
---|
| 168 | + start_backtrace(&frame, |
---|
| 169 | + thread_saved_fp(tsk), |
---|
| 170 | + thread_saved_pc(tsk)); |
---|
172 | 171 | } |
---|
173 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
174 | | - frame.graph = tsk->curr_ret_stack; |
---|
175 | | -#endif |
---|
176 | 172 | |
---|
177 | | - walk_stackframe(tsk, &frame, save_trace, &data); |
---|
178 | | - if (trace->nr_entries < trace->max_entries) |
---|
179 | | - trace->entries[trace->nr_entries++] = ULONG_MAX; |
---|
| 173 | + printk("%sCall trace:\n", loglvl); |
---|
| 174 | + do { |
---|
| 175 | + /* skip until specified stack frame */ |
---|
| 176 | + if (!skip) { |
---|
| 177 | + dump_backtrace_entry(frame.pc, loglvl); |
---|
| 178 | + } else if (frame.fp == regs->regs[29]) { |
---|
| 179 | + skip = 0; |
---|
| 180 | + /* |
---|
| 181 | + * Mostly, this is the case where this function is |
---|
| 182 | + * called in panic/abort. As exception handler's |
---|
| 183 | + * stack frame does not contain the corresponding pc |
---|
| 184 | + * at which an exception has taken place, use regs->pc |
---|
| 185 | + * instead. |
---|
| 186 | + */ |
---|
| 187 | + dump_backtrace_entry(regs->pc, loglvl); |
---|
| 188 | + } |
---|
| 189 | + } while (!unwind_frame(tsk, &frame)); |
---|
180 | 190 | |
---|
181 | 191 | put_task_stack(tsk); |
---|
182 | 192 | } |
---|
183 | | -EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
---|
| 193 | +EXPORT_SYMBOL_GPL(dump_backtrace); |
---|
184 | 194 | |
---|
185 | | -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
---|
| 195 | +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) |
---|
186 | 196 | { |
---|
187 | | - __save_stack_trace(tsk, trace, 1); |
---|
| 197 | + dump_backtrace(NULL, tsk, loglvl); |
---|
| 198 | + barrier(); |
---|
188 | 199 | } |
---|
189 | 200 | |
---|
190 | | -void save_stack_trace(struct stack_trace *trace) |
---|
| 201 | +#ifdef CONFIG_STACKTRACE |
---|
| 202 | + |
---|
| 203 | +noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, |
---|
| 204 | + void *cookie, struct task_struct *task, |
---|
| 205 | + struct pt_regs *regs) |
---|
191 | 206 | { |
---|
192 | | - __save_stack_trace(current, trace, 0); |
---|
| 207 | + struct stackframe frame; |
---|
| 208 | + |
---|
| 209 | + if (regs) |
---|
| 210 | + start_backtrace(&frame, regs->regs[29], regs->pc); |
---|
| 211 | + else if (task == current) |
---|
| 212 | + start_backtrace(&frame, |
---|
| 213 | + (unsigned long)__builtin_frame_address(1), |
---|
| 214 | + (unsigned long)__builtin_return_address(0)); |
---|
| 215 | + else |
---|
| 216 | + start_backtrace(&frame, thread_saved_fp(task), |
---|
| 217 | + thread_saved_pc(task)); |
---|
| 218 | + |
---|
| 219 | + walk_stackframe(task, &frame, consume_entry, cookie); |
---|
193 | 220 | } |
---|
194 | 221 | |
---|
195 | | -EXPORT_SYMBOL_GPL(save_stack_trace); |
---|
196 | 222 | #endif |
---|