| .. | .. |
|---|
| 59 | 59 | { |
|---|
| 60 | 60 | unsigned long sp; |
|---|
| 61 | 61 | |
|---|
| 62 | | - sp = current_stack_pointer(); |
|---|
| 62 | + sp = current_stack_frame(); |
|---|
| 63 | 63 | |
|---|
| 64 | 64 | save_context_stack(trace, sp, current, 1); |
|---|
| 65 | 65 | } |
|---|
| .. | .. |
|---|
| 69 | 69 | { |
|---|
| 70 | 70 | unsigned long sp; |
|---|
| 71 | 71 | |
|---|
| 72 | + if (!try_get_task_stack(tsk)) |
|---|
| 73 | + return; |
|---|
| 74 | + |
|---|
| 72 | 75 | if (tsk == current) |
|---|
| 73 | | - sp = current_stack_pointer(); |
|---|
| 76 | + sp = current_stack_frame(); |
|---|
| 74 | 77 | else |
|---|
| 75 | 78 | sp = tsk->thread.ksp; |
|---|
| 76 | 79 | |
|---|
| 77 | 80 | save_context_stack(trace, sp, tsk, 0); |
|---|
| 81 | + |
|---|
| 82 | + put_task_stack(tsk); |
|---|
| 78 | 83 | } |
|---|
| 79 | 84 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
|---|
| 80 | 85 | |
|---|
| .. | .. |
|---|
| 86 | 91 | EXPORT_SYMBOL_GPL(save_stack_trace_regs); |
|---|
| 87 | 92 | |
|---|
| 88 | 93 | #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE |
|---|
| 89 | | -int |
|---|
| 90 | | -save_stack_trace_tsk_reliable(struct task_struct *tsk, |
|---|
| 91 | | - struct stack_trace *trace) |
|---|
| 94 | +/* |
|---|
| 95 | + * This function returns an error if it detects any unreliable features of the |
|---|
| 96 | + * stack. Otherwise it guarantees that the stack trace is reliable. |
|---|
| 97 | + * |
|---|
| 98 | + * If the task is not 'current', the caller *must* ensure the task is inactive. |
|---|
| 99 | + */ |
|---|
| 100 | +static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, |
|---|
| 101 | + struct stack_trace *trace) |
|---|
| 92 | 102 | { |
|---|
| 93 | 103 | unsigned long sp; |
|---|
| 104 | + unsigned long newsp; |
|---|
| 94 | 105 | unsigned long stack_page = (unsigned long)task_stack_page(tsk); |
|---|
| 95 | 106 | unsigned long stack_end; |
|---|
| 96 | 107 | int graph_idx = 0; |
|---|
| 97 | | - |
|---|
| 98 | | - /* |
|---|
| 99 | | - * The last frame (unwinding first) may not yet have saved |
|---|
| 100 | | - * its LR onto the stack. |
|---|
| 101 | | - */ |
|---|
| 102 | | - int firstframe = 1; |
|---|
| 103 | | - |
|---|
| 104 | | - if (tsk == current) |
|---|
| 105 | | - sp = current_stack_pointer(); |
|---|
| 106 | | - else |
|---|
| 107 | | - sp = tsk->thread.ksp; |
|---|
| 108 | + bool firstframe; |
|---|
| 108 | 109 | |
|---|
| 109 | 110 | stack_end = stack_page + THREAD_SIZE; |
|---|
| 110 | 111 | if (!is_idle_task(tsk)) { |
|---|
| .. | .. |
|---|
| 131 | 132 | stack_end -= STACK_FRAME_OVERHEAD; |
|---|
| 132 | 133 | } |
|---|
| 133 | 134 | |
|---|
| 135 | + if (tsk == current) |
|---|
| 136 | + sp = current_stack_frame(); |
|---|
| 137 | + else |
|---|
| 138 | + sp = tsk->thread.ksp; |
|---|
| 139 | + |
|---|
| 134 | 140 | if (sp < stack_page + sizeof(struct thread_struct) || |
|---|
| 135 | 141 | sp > stack_end - STACK_FRAME_MIN_SIZE) { |
|---|
| 136 | | - return 1; |
|---|
| 142 | + return -EINVAL; |
|---|
| 137 | 143 | } |
|---|
| 138 | 144 | |
|---|
| 139 | | - for (;;) { |
|---|
| 145 | + for (firstframe = true; sp != stack_end; |
|---|
| 146 | + firstframe = false, sp = newsp) { |
|---|
| 140 | 147 | unsigned long *stack = (unsigned long *) sp; |
|---|
| 141 | | - unsigned long newsp, ip; |
|---|
| 148 | + unsigned long ip; |
|---|
| 142 | 149 | |
|---|
| 143 | 150 | /* sanity check: ABI requires SP to be aligned 16 bytes. */ |
|---|
| 144 | 151 | if (sp & 0xF) |
|---|
| 145 | | - return 1; |
|---|
| 146 | | - |
|---|
| 147 | | - /* Mark stacktraces with exception frames as unreliable. */ |
|---|
| 148 | | - if (sp <= stack_end - STACK_INT_FRAME_SIZE && |
|---|
| 149 | | - stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { |
|---|
| 150 | | - return 1; |
|---|
| 151 | | - } |
|---|
| 152 | + return -EINVAL; |
|---|
| 152 | 153 | |
|---|
| 153 | 154 | newsp = stack[0]; |
|---|
| 154 | 155 | /* Stack grows downwards; unwinder may only go up. */ |
|---|
| 155 | 156 | if (newsp <= sp) |
|---|
| 156 | | - return 1; |
|---|
| 157 | + return -EINVAL; |
|---|
| 157 | 158 | |
|---|
| 158 | 159 | if (newsp != stack_end && |
|---|
| 159 | 160 | newsp > stack_end - STACK_FRAME_MIN_SIZE) { |
|---|
| 160 | | - return 1; /* invalid backlink, too far up. */ |
|---|
| 161 | + return -EINVAL; /* invalid backlink, too far up. */ |
|---|
| 162 | + } |
|---|
| 163 | + |
|---|
| 164 | + /* |
|---|
| 165 | + * We can only trust the bottom frame's backlink, the |
|---|
| 166 | + * rest of the frame may be uninitialized, continue to |
|---|
| 167 | + * the next. |
|---|
| 168 | + */ |
|---|
| 169 | + if (firstframe) |
|---|
| 170 | + continue; |
|---|
| 171 | + |
|---|
| 172 | + /* Mark stacktraces with exception frames as unreliable. */ |
|---|
| 173 | + if (sp <= stack_end - STACK_INT_FRAME_SIZE && |
|---|
| 174 | + stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { |
|---|
| 175 | + return -EINVAL; |
|---|
| 161 | 176 | } |
|---|
| 162 | 177 | |
|---|
| 163 | 178 | /* Examine the saved LR: it must point into kernel code. */ |
|---|
| 164 | 179 | ip = stack[STACK_FRAME_LR_SAVE]; |
|---|
| 165 | | - if (!firstframe && !__kernel_text_address(ip)) |
|---|
| 166 | | - return 1; |
|---|
| 167 | | - firstframe = 0; |
|---|
| 180 | + if (!__kernel_text_address(ip)) |
|---|
| 181 | + return -EINVAL; |
|---|
| 168 | 182 | |
|---|
| 169 | 183 | /* |
|---|
| 170 | 184 | * FIXME: IMHO these tests do not belong in |
|---|
| 171 | 185 | * arch-dependent code, they are generic. |
|---|
| 172 | 186 | */ |
|---|
| 173 | | - ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL); |
|---|
| 187 | + ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack); |
|---|
| 174 | 188 | #ifdef CONFIG_KPROBES |
|---|
| 175 | 189 | /* |
|---|
| 176 | 190 | * Mark stacktraces with kretprobed functions on them |
|---|
| 177 | 191 | * as unreliable. |
|---|
| 178 | 192 | */ |
|---|
| 179 | 193 | if (ip == (unsigned long)kretprobe_trampoline) |
|---|
| 180 | | - return 1; |
|---|
| 194 | + return -EINVAL; |
|---|
| 181 | 195 | #endif |
|---|
| 182 | 196 | |
|---|
| 197 | + if (trace->nr_entries >= trace->max_entries) |
|---|
| 198 | + return -E2BIG; |
|---|
| 183 | 199 | if (!trace->skip) |
|---|
| 184 | 200 | trace->entries[trace->nr_entries++] = ip; |
|---|
| 185 | 201 | else |
|---|
| 186 | 202 | trace->skip--; |
|---|
| 187 | | - |
|---|
| 188 | | - if (newsp == stack_end) |
|---|
| 189 | | - break; |
|---|
| 190 | | - |
|---|
| 191 | | - if (trace->nr_entries >= trace->max_entries) |
|---|
| 192 | | - return -E2BIG; |
|---|
| 193 | | - |
|---|
| 194 | | - sp = newsp; |
|---|
| 195 | 203 | } |
|---|
| 196 | 204 | return 0; |
|---|
| 197 | 205 | } |
|---|
| 198 | | -EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable); |
|---|
| 206 | + |
|---|
| 207 | +int save_stack_trace_tsk_reliable(struct task_struct *tsk, |
|---|
| 208 | + struct stack_trace *trace) |
|---|
| 209 | +{ |
|---|
| 210 | + int ret; |
|---|
| 211 | + |
|---|
| 212 | + /* |
|---|
| 213 | + * If the task doesn't have a stack (e.g., a zombie), the stack is |
|---|
| 214 | + * "reliably" empty. |
|---|
| 215 | + */ |
|---|
| 216 | + if (!try_get_task_stack(tsk)) |
|---|
| 217 | + return 0; |
|---|
| 218 | + |
|---|
| 219 | + ret = __save_stack_trace_tsk_reliable(tsk, trace); |
|---|
| 220 | + |
|---|
| 221 | + put_task_stack(tsk); |
|---|
| 222 | + |
|---|
| 223 | + return ret; |
|---|
| 224 | +} |
|---|
| 199 | 225 | #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ |
|---|
| 200 | 226 | |
|---|
| 201 | 227 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) |
|---|
| .. | .. |
|---|
| 250 | 276 | pr_cont(" current pointer corrupt? (%px)\n", p->__current); |
|---|
| 251 | 277 | |
|---|
| 252 | 278 | pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1); |
|---|
| 253 | | - show_stack(p->__current, (unsigned long *)p->saved_r1); |
|---|
| 279 | + show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING); |
|---|
| 254 | 280 | } |
|---|
| 255 | 281 | } |
|---|
| 256 | 282 | |
|---|