hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/x86/kernel/stacktrace.c
....@@ -12,78 +12,31 @@
1212 #include <asm/stacktrace.h>
1313 #include <asm/unwind.h>
1414
15
-static int save_stack_address(struct stack_trace *trace, unsigned long addr,
16
- bool nosched)
17
-{
18
- if (nosched && in_sched_functions(addr))
19
- return 0;
20
-
21
- if (trace->skip > 0) {
22
- trace->skip--;
23
- return 0;
24
- }
25
-
26
- if (trace->nr_entries >= trace->max_entries)
27
- return -1;
28
-
29
- trace->entries[trace->nr_entries++] = addr;
30
- return 0;
31
-}
32
-
33
-static void noinline __save_stack_trace(struct stack_trace *trace,
34
- struct task_struct *task, struct pt_regs *regs,
35
- bool nosched)
15
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
16
+ struct task_struct *task, struct pt_regs *regs)
3617 {
3718 struct unwind_state state;
3819 unsigned long addr;
3920
40
- if (regs)
41
- save_stack_address(trace, regs->ip, nosched);
21
+ if (regs && !consume_entry(cookie, regs->ip))
22
+ return;
4223
4324 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
4425 unwind_next_frame(&state)) {
4526 addr = unwind_get_return_address(&state);
46
- if (!addr || save_stack_address(trace, addr, nosched))
27
+ if (!addr || !consume_entry(cookie, addr))
4728 break;
4829 }
49
-
50
- if (trace->nr_entries < trace->max_entries)
51
- trace->entries[trace->nr_entries++] = ULONG_MAX;
5230 }
5331
5432 /*
55
- * Save stack-backtrace addresses into a stack_trace buffer.
33
+ * This function returns an error if it detects any unreliable features of the
34
+ * stack. Otherwise it guarantees that the stack trace is reliable.
35
+ *
36
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
5637 */
57
-void save_stack_trace(struct stack_trace *trace)
58
-{
59
- trace->skip++;
60
- __save_stack_trace(trace, current, NULL, false);
61
-}
62
-EXPORT_SYMBOL_GPL(save_stack_trace);
63
-
64
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
65
-{
66
- __save_stack_trace(trace, current, regs, false);
67
-}
68
-
69
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
70
-{
71
- if (!try_get_task_stack(tsk))
72
- return;
73
-
74
- if (tsk == current)
75
- trace->skip++;
76
- __save_stack_trace(trace, tsk, NULL, true);
77
-
78
- put_task_stack(tsk);
79
-}
80
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
81
-
82
-#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
83
-
84
-static int __always_inline
85
-__save_stack_trace_reliable(struct stack_trace *trace,
86
- struct task_struct *task)
38
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
39
+ void *cookie, struct task_struct *task)
8740 {
8841 struct unwind_state state;
8942 struct pt_regs *regs;
....@@ -97,7 +50,7 @@
9750 if (regs) {
9851 /* Success path for user tasks */
9952 if (user_mode(regs))
100
- goto success;
53
+ return 0;
10154
10255 /*
10356 * Kernel mode registers on the stack indicate an
....@@ -105,7 +58,6 @@
10558 * or a page fault), which can make frame pointers
10659 * unreliable.
10760 */
108
-
10961 if (IS_ENABLED(CONFIG_FRAME_POINTER))
11062 return -EINVAL;
11163 }
....@@ -120,7 +72,7 @@
12072 if (!addr)
12173 return -EINVAL;
12274
123
- if (save_stack_address(trace, addr, false))
75
+ if (!consume_entry(cookie, addr))
12476 return -EINVAL;
12577 }
12678
....@@ -128,42 +80,8 @@
12880 if (unwind_error(&state))
12981 return -EINVAL;
13082
131
- /* Success path for non-user tasks, i.e. kthreads and idle tasks */
132
- if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
133
- return -EINVAL;
134
-
135
-success:
136
- if (trace->nr_entries < trace->max_entries)
137
- trace->entries[trace->nr_entries++] = ULONG_MAX;
138
-
13983 return 0;
14084 }
141
-
142
-/*
143
- * This function returns an error if it detects any unreliable features of the
144
- * stack. Otherwise it guarantees that the stack trace is reliable.
145
- *
146
- * If the task is not 'current', the caller *must* ensure the task is inactive.
147
- */
148
-int save_stack_trace_tsk_reliable(struct task_struct *tsk,
149
- struct stack_trace *trace)
150
-{
151
- int ret;
152
-
153
- /*
154
- * If the task doesn't have a stack (e.g., a zombie), the stack is
155
- * "reliably" empty.
156
- */
157
- if (!try_get_task_stack(tsk))
158
- return 0;
159
-
160
- ret = __save_stack_trace_reliable(trace, tsk);
161
-
162
- put_task_stack(tsk);
163
-
164
- return ret;
165
-}
166
-#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
16785
16886 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
16987
....@@ -173,31 +91,33 @@
17391 };
17492
17593 static int
176
-copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
94
+copy_stack_frame(const struct stack_frame_user __user *fp,
95
+ struct stack_frame_user *frame)
17796 {
17897 int ret;
17998
180
- if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
99
+ if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
181100 return 0;
182101
183102 ret = 1;
184103 pagefault_disable();
185
- if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
104
+ if (__get_user(frame->next_fp, &fp->next_fp) ||
105
+ __get_user(frame->ret_addr, &fp->ret_addr))
186106 ret = 0;
187107 pagefault_enable();
188108
189109 return ret;
190110 }
191111
192
-static inline void __save_stack_trace_user(struct stack_trace *trace)
112
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
113
+ const struct pt_regs *regs)
193114 {
194
- const struct pt_regs *regs = task_pt_regs(current);
195115 const void __user *fp = (const void __user *)regs->bp;
196116
197
- if (trace->nr_entries < trace->max_entries)
198
- trace->entries[trace->nr_entries++] = regs->ip;
117
+ if (!consume_entry(cookie, regs->ip))
118
+ return;
199119
200
- while (trace->nr_entries < trace->max_entries) {
120
+ while (1) {
201121 struct stack_frame_user frame;
202122
203123 frame.next_fp = NULL;
....@@ -206,24 +126,11 @@
206126 break;
207127 if ((unsigned long)fp < regs->sp)
208128 break;
209
- if (frame.ret_addr) {
210
- trace->entries[trace->nr_entries++] =
211
- frame.ret_addr;
212
- }
213
- if (fp == frame.next_fp)
129
+ if (!frame.ret_addr)
130
+ break;
131
+ if (!consume_entry(cookie, frame.ret_addr))
214132 break;
215133 fp = frame.next_fp;
216134 }
217135 }
218136
219
-void save_stack_trace_user(struct stack_trace *trace)
220
-{
221
- /*
222
- * Trace user stack if we are not a kernel thread
223
- */
224
- if (current->mm) {
225
- __save_stack_trace_user(trace);
226
- }
227
- if (trace->nr_entries < trace->max_entries)
228
- trace->entries[trace->nr_entries++] = ULONG_MAX;
229
-}