hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/arch/arm64/kernel/stacktrace.c
....@@ -1,19 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Stack tracing support
34 *
45 * Copyright (C) 2012 ARM Ltd.
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- * GNU General Public License for more details.
14
- *
15
- * You should have received a copy of the GNU General Public License
16
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
176 */
187 #include <linux/kernel.h>
198 #include <linux/export.h>
....@@ -25,6 +14,7 @@
2514 #include <linux/stacktrace.h>
2615
2716 #include <asm/irq.h>
17
+#include <asm/pointer_auth.h>
2818 #include <asm/stack_pointer.h>
2919 #include <asm/stacktrace.h>
3020
....@@ -41,9 +31,18 @@
4131 * ldp x29, x30, [sp]
4232 * add sp, sp, #0x10
4333 */
34
+
35
+/*
36
+ * Unwind from one frame record (A) to the next frame record (B).
37
+ *
38
+ * We terminate early if the location of B indicates a malformed chain of frame
39
+ * records (e.g. a cycle), determined based on the location and fp value of A
40
+ * and the location (but not the fp value) of B.
41
+ */
4442 int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
4543 {
4644 unsigned long fp = frame->fp;
45
+ struct stack_info info;
4746
4847 if (fp & 0xf)
4948 return -EINVAL;
....@@ -51,29 +50,59 @@
5150 if (!tsk)
5251 tsk = current;
5352
54
- if (!on_accessible_stack(tsk, fp, NULL))
53
+ if (!on_accessible_stack(tsk, fp, &info))
5554 return -EINVAL;
5655
56
+ if (test_bit(info.type, frame->stacks_done))
57
+ return -EINVAL;
58
+
59
+ /*
60
+ * As stacks grow downward, any valid record on the same stack must be
61
+ * at a strictly higher address than the prior record.
62
+ *
63
+ * Stacks can nest in several valid orders, e.g.
64
+ *
65
+ * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
66
+ * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
67
+ *
68
+ * ... but the nesting itself is strict. Once we transition from one
69
+ * stack to another, it's never valid to unwind back to that first
70
+ * stack.
71
+ */
72
+ if (info.type == frame->prev_type) {
73
+ if (fp <= frame->prev_fp)
74
+ return -EINVAL;
75
+ } else {
76
+ set_bit(frame->prev_type, frame->stacks_done);
77
+ }
78
+
79
+ /*
80
+ * Record this frame record's values and location. The prev_fp and
81
+ * prev_type are only meaningful to the next unwind_frame() invocation.
82
+ */
5783 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
5884 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
85
+ frame->prev_fp = fp;
86
+ frame->prev_type = info.type;
5987
6088 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6189 if (tsk->ret_stack &&
62
- (frame->pc == (unsigned long)return_to_handler)) {
63
- if (WARN_ON_ONCE(frame->graph == -1))
64
- return -EINVAL;
65
- if (frame->graph < -1)
66
- frame->graph += FTRACE_NOTRACE_DEPTH;
67
-
90
+ (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
91
+ struct ftrace_ret_stack *ret_stack;
6892 /*
6993 * This is a case where function graph tracer has
7094 * modified a return address (LR) in a stack frame
7195 * to hook a function return.
7296 * So replace it to an original value.
7397 */
74
- frame->pc = tsk->ret_stack[frame->graph--].ret;
98
+ ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
99
+ if (WARN_ON_ONCE(!ret_stack))
100
+ return -EINVAL;
101
+ frame->pc = ret_stack->ret;
75102 }
76103 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
104
+
105
+ frame->pc = ptrauth_strip_insn_pac(frame->pc);
77106
78107 /*
79108 * Frames created upon entry from EL0 have NULL FP and PC values, so
....@@ -89,12 +118,12 @@
89118 NOKPROBE_SYMBOL(unwind_frame);
90119
91120 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
92
- int (*fn)(struct stackframe *, void *), void *data)
121
+ bool (*fn)(void *, unsigned long), void *data)
93122 {
94123 while (1) {
95124 int ret;
96125
97
- if (fn(frame, data))
126
+ if (!fn(data, frame->pc))
98127 break;
99128 ret = unwind_frame(tsk, frame);
100129 if (ret < 0)
....@@ -103,94 +132,91 @@
103132 }
104133 NOKPROBE_SYMBOL(walk_stackframe);
105134
106
-#ifdef CONFIG_STACKTRACE
107
-struct stack_trace_data {
108
- struct stack_trace *trace;
109
- unsigned int no_sched_functions;
110
- unsigned int skip;
111
-};
112
-
113
-static int save_trace(struct stackframe *frame, void *d)
135
+static void dump_backtrace_entry(unsigned long where, const char *loglvl)
114136 {
115
- struct stack_trace_data *data = d;
116
- struct stack_trace *trace = data->trace;
117
- unsigned long addr = frame->pc;
137
+ printk("%s %pS\n", loglvl, (void *)where);
138
+}
118139
119
- if (data->no_sched_functions && in_sched_functions(addr))
120
- return 0;
121
- if (data->skip) {
122
- data->skip--;
123
- return 0;
140
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
141
+ const char *loglvl)
142
+{
143
+ struct stackframe frame;
144
+ int skip = 0;
145
+
146
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
147
+
148
+ if (regs) {
149
+ if (user_mode(regs))
150
+ return;
151
+ skip = 1;
124152 }
125153
126
- trace->entries[trace->nr_entries++] = addr;
127
-
128
- return trace->nr_entries >= trace->max_entries;
129
-}
130
-
131
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
132
-{
133
- struct stack_trace_data data;
134
- struct stackframe frame;
135
-
136
- data.trace = trace;
137
- data.skip = trace->skip;
138
- data.no_sched_functions = 0;
139
-
140
- frame.fp = regs->regs[29];
141
- frame.pc = regs->pc;
142
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
143
- frame.graph = current->curr_ret_stack;
144
-#endif
145
-
146
- walk_stackframe(current, &frame, save_trace, &data);
147
- if (trace->nr_entries < trace->max_entries)
148
- trace->entries[trace->nr_entries++] = ULONG_MAX;
149
-}
150
-
151
-static noinline void __save_stack_trace(struct task_struct *tsk,
152
- struct stack_trace *trace, unsigned int nosched)
153
-{
154
- struct stack_trace_data data;
155
- struct stackframe frame;
154
+ if (!tsk)
155
+ tsk = current;
156156
157157 if (!try_get_task_stack(tsk))
158158 return;
159159
160
- data.trace = trace;
161
- data.skip = trace->skip;
162
- data.no_sched_functions = nosched;
163
-
164
- if (tsk != current) {
165
- frame.fp = thread_saved_fp(tsk);
166
- frame.pc = thread_saved_pc(tsk);
160
+ if (tsk == current) {
161
+ start_backtrace(&frame,
162
+ (unsigned long)__builtin_frame_address(0),
163
+ (unsigned long)dump_backtrace);
167164 } else {
168
- /* We don't want this function nor the caller */
169
- data.skip += 2;
170
- frame.fp = (unsigned long)__builtin_frame_address(0);
171
- frame.pc = (unsigned long)__save_stack_trace;
165
+ /*
166
+ * task blocked in __switch_to
167
+ */
168
+ start_backtrace(&frame,
169
+ thread_saved_fp(tsk),
170
+ thread_saved_pc(tsk));
172171 }
173
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
174
- frame.graph = tsk->curr_ret_stack;
175
-#endif
176172
177
- walk_stackframe(tsk, &frame, save_trace, &data);
178
- if (trace->nr_entries < trace->max_entries)
179
- trace->entries[trace->nr_entries++] = ULONG_MAX;
173
+ printk("%sCall trace:\n", loglvl);
174
+ do {
175
+ /* skip until specified stack frame */
176
+ if (!skip) {
177
+ dump_backtrace_entry(frame.pc, loglvl);
178
+ } else if (frame.fp == regs->regs[29]) {
179
+ skip = 0;
180
+ /*
181
+ * Mostly, this is the case where this function is
182
+ * called in panic/abort. As exception handler's
183
+ * stack frame does not contain the corresponding pc
184
+ * at which an exception has taken place, use regs->pc
185
+ * instead.
186
+ */
187
+ dump_backtrace_entry(regs->pc, loglvl);
188
+ }
189
+ } while (!unwind_frame(tsk, &frame));
180190
181191 put_task_stack(tsk);
182192 }
183
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
193
+EXPORT_SYMBOL_GPL(dump_backtrace);
184194
185
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
195
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
186196 {
187
- __save_stack_trace(tsk, trace, 1);
197
+ dump_backtrace(NULL, tsk, loglvl);
198
+ barrier();
188199 }
189200
190
-void save_stack_trace(struct stack_trace *trace)
201
+#ifdef CONFIG_STACKTRACE
202
+
203
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
204
+ void *cookie, struct task_struct *task,
205
+ struct pt_regs *regs)
191206 {
192
- __save_stack_trace(current, trace, 0);
207
+ struct stackframe frame;
208
+
209
+ if (regs)
210
+ start_backtrace(&frame, regs->regs[29], regs->pc);
211
+ else if (task == current)
212
+ start_backtrace(&frame,
213
+ (unsigned long)__builtin_frame_address(1),
214
+ (unsigned long)__builtin_return_address(0));
215
+ else
216
+ start_backtrace(&frame, thread_saved_fp(task),
217
+ thread_saved_pc(task));
218
+
219
+ walk_stackframe(task, &frame, consume_entry, cookie);
193220 }
194221
195
-EXPORT_SYMBOL_GPL(save_stack_trace);
196222 #endif