.. | .. |
---|
21 | 21 | #include <asm/debug.h> |
---|
22 | 22 | #include <asm/dis.h> |
---|
23 | 23 | #include <asm/ipl.h> |
---|
| 24 | +#include <asm/unwind.h> |
---|
24 | 25 | |
---|
25 | | -/* |
---|
26 | | - * For dump_trace we have tree different stack to consider: |
---|
27 | | - * - the panic stack which is used if the kernel stack has overflown |
---|
28 | | - * - the asynchronous interrupt stack (cpu related) |
---|
29 | | - * - the synchronous kernel stack (process related) |
---|
30 | | - * The stack trace can start at any of the three stacks and can potentially |
---|
31 | | - * touch all of them. The order is: panic stack, async stack, sync stack. |
---|
32 | | - */ |
---|
33 | | -static unsigned long |
---|
34 | | -__dump_trace(dump_trace_func_t func, void *data, unsigned long sp, |
---|
35 | | - unsigned long low, unsigned long high) |
---|
| 26 | +const char *stack_type_name(enum stack_type type) |
---|
36 | 27 | { |
---|
37 | | - struct stack_frame *sf; |
---|
38 | | - struct pt_regs *regs; |
---|
39 | | - |
---|
40 | | - while (1) { |
---|
41 | | - if (sp < low || sp > high - sizeof(*sf)) |
---|
42 | | - return sp; |
---|
43 | | - sf = (struct stack_frame *) sp; |
---|
44 | | - if (func(data, sf->gprs[8], 0)) |
---|
45 | | - return sp; |
---|
46 | | - /* Follow the backchain. */ |
---|
47 | | - while (1) { |
---|
48 | | - low = sp; |
---|
49 | | - sp = sf->back_chain; |
---|
50 | | - if (!sp) |
---|
51 | | - break; |
---|
52 | | - if (sp <= low || sp > high - sizeof(*sf)) |
---|
53 | | - return sp; |
---|
54 | | - sf = (struct stack_frame *) sp; |
---|
55 | | - if (func(data, sf->gprs[8], 1)) |
---|
56 | | - return sp; |
---|
57 | | - } |
---|
58 | | - /* Zero backchain detected, check for interrupt frame. */ |
---|
59 | | - sp = (unsigned long) (sf + 1); |
---|
60 | | - if (sp <= low || sp > high - sizeof(*regs)) |
---|
61 | | - return sp; |
---|
62 | | - regs = (struct pt_regs *) sp; |
---|
63 | | - if (!user_mode(regs)) { |
---|
64 | | - if (func(data, regs->psw.addr, 1)) |
---|
65 | | - return sp; |
---|
66 | | - } |
---|
67 | | - low = sp; |
---|
68 | | - sp = regs->gprs[15]; |
---|
| 28 | + switch (type) { |
---|
| 29 | + case STACK_TYPE_TASK: |
---|
| 30 | + return "task"; |
---|
| 31 | + case STACK_TYPE_IRQ: |
---|
| 32 | + return "irq"; |
---|
| 33 | + case STACK_TYPE_NODAT: |
---|
| 34 | + return "nodat"; |
---|
| 35 | + case STACK_TYPE_RESTART: |
---|
| 36 | + return "restart"; |
---|
| 37 | + default: |
---|
| 38 | + return "unknown"; |
---|
69 | 39 | } |
---|
70 | 40 | } |
---|
| 41 | +EXPORT_SYMBOL_GPL(stack_type_name); |
---|
71 | 42 | |
---|
72 | | -void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task, |
---|
73 | | - unsigned long sp) |
---|
| 43 | +static inline bool in_stack(unsigned long sp, struct stack_info *info, |
---|
| 44 | + enum stack_type type, unsigned long low, |
---|
| 45 | + unsigned long high) |
---|
74 | 46 | { |
---|
75 | | - unsigned long frame_size; |
---|
| 47 | + if (sp < low || sp >= high) |
---|
| 48 | + return false; |
---|
| 49 | + info->type = type; |
---|
| 50 | + info->begin = low; |
---|
| 51 | + info->end = high; |
---|
| 52 | + return true; |
---|
| 53 | +} |
---|
| 54 | + |
---|
| 55 | +static bool in_task_stack(unsigned long sp, struct task_struct *task, |
---|
| 56 | + struct stack_info *info) |
---|
| 57 | +{ |
---|
| 58 | + unsigned long stack; |
---|
| 59 | + |
---|
| 60 | + stack = (unsigned long) task_stack_page(task); |
---|
| 61 | + return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE); |
---|
| 62 | +} |
---|
| 63 | + |
---|
| 64 | +static bool in_irq_stack(unsigned long sp, struct stack_info *info) |
---|
| 65 | +{ |
---|
| 66 | + unsigned long frame_size, top; |
---|
76 | 67 | |
---|
77 | 68 | frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); |
---|
78 | | -#ifdef CONFIG_CHECK_STACK |
---|
79 | | - sp = __dump_trace(func, data, sp, |
---|
80 | | - S390_lowcore.panic_stack + frame_size - PAGE_SIZE, |
---|
81 | | - S390_lowcore.panic_stack + frame_size); |
---|
82 | | -#endif |
---|
83 | | - sp = __dump_trace(func, data, sp, |
---|
84 | | - S390_lowcore.async_stack + frame_size - ASYNC_SIZE, |
---|
85 | | - S390_lowcore.async_stack + frame_size); |
---|
86 | | - task = task ?: current; |
---|
87 | | - __dump_trace(func, data, sp, |
---|
88 | | - (unsigned long)task_stack_page(task), |
---|
89 | | - (unsigned long)task_stack_page(task) + THREAD_SIZE); |
---|
90 | | -} |
---|
91 | | -EXPORT_SYMBOL_GPL(dump_trace); |
---|
92 | | - |
---|
93 | | -static int show_address(void *data, unsigned long address, int reliable) |
---|
94 | | -{ |
---|
95 | | - if (reliable) |
---|
96 | | - printk(" [<%016lx>] %pSR \n", address, (void *)address); |
---|
97 | | - else |
---|
98 | | - printk("([<%016lx>] %pSR)\n", address, (void *)address); |
---|
99 | | - return 0; |
---|
| 69 | + top = S390_lowcore.async_stack + frame_size; |
---|
| 70 | + return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top); |
---|
100 | 71 | } |
---|
101 | 72 | |
---|
102 | | -void show_stack(struct task_struct *task, unsigned long *stack) |
---|
| 73 | +static bool in_nodat_stack(unsigned long sp, struct stack_info *info) |
---|
103 | 74 | { |
---|
104 | | - unsigned long sp = (unsigned long) stack; |
---|
| 75 | + unsigned long frame_size, top; |
---|
105 | 76 | |
---|
| 77 | + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); |
---|
| 78 | + top = S390_lowcore.nodat_stack + frame_size; |
---|
| 79 | + return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top); |
---|
| 80 | +} |
---|
| 81 | + |
---|
| 82 | +static bool in_restart_stack(unsigned long sp, struct stack_info *info) |
---|
| 83 | +{ |
---|
| 84 | + unsigned long frame_size, top; |
---|
| 85 | + |
---|
| 86 | + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); |
---|
| 87 | + top = S390_lowcore.restart_stack + frame_size; |
---|
| 88 | + return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top); |
---|
| 89 | +} |
---|
| 90 | + |
---|
| 91 | +int get_stack_info(unsigned long sp, struct task_struct *task, |
---|
| 92 | + struct stack_info *info, unsigned long *visit_mask) |
---|
| 93 | +{ |
---|
106 | 94 | if (!sp) |
---|
107 | | - sp = task ? task->thread.ksp : current_stack_pointer(); |
---|
108 | | - printk("Call Trace:\n"); |
---|
109 | | - dump_trace(show_address, NULL, task, sp); |
---|
110 | | - if (!task) |
---|
111 | | - task = current; |
---|
112 | | - debug_show_held_locks(task); |
---|
| 95 | + goto unknown; |
---|
| 96 | + |
---|
| 97 | + /* Sanity check: ABI requires SP to be aligned 8 bytes. */ |
---|
| 98 | + if (sp & 0x7) |
---|
| 99 | + goto unknown; |
---|
| 100 | + |
---|
| 101 | + /* Check per-task stack */ |
---|
| 102 | + if (in_task_stack(sp, task, info)) |
---|
| 103 | + goto recursion_check; |
---|
| 104 | + |
---|
| 105 | + if (task != current) |
---|
| 106 | + goto unknown; |
---|
| 107 | + |
---|
| 108 | + /* Check per-cpu stacks */ |
---|
| 109 | + if (!in_irq_stack(sp, info) && |
---|
| 110 | + !in_nodat_stack(sp, info) && |
---|
| 111 | + !in_restart_stack(sp, info)) |
---|
| 112 | + goto unknown; |
---|
| 113 | + |
---|
| 114 | +recursion_check: |
---|
| 115 | + /* |
---|
| 116 | + * Make sure we don't iterate through any given stack more than once. |
---|
| 117 | + * If it comes up a second time then there's something wrong going on: |
---|
| 118 | + * just break out and report an unknown stack type. |
---|
| 119 | + */ |
---|
| 120 | + if (*visit_mask & (1UL << info->type)) |
---|
| 121 | + goto unknown; |
---|
| 122 | + *visit_mask |= 1UL << info->type; |
---|
| 123 | + return 0; |
---|
| 124 | +unknown: |
---|
| 125 | + info->type = STACK_TYPE_UNKNOWN; |
---|
| 126 | + return -EINVAL; |
---|
| 127 | +} |
---|
| 128 | + |
---|
| 129 | +void show_stack(struct task_struct *task, unsigned long *stack, |
---|
| 130 | + const char *loglvl) |
---|
| 131 | +{ |
---|
| 132 | + struct unwind_state state; |
---|
| 133 | + |
---|
| 134 | + printk("%sCall Trace:\n", loglvl); |
---|
| 135 | + unwind_for_each_frame(&state, task, NULL, (unsigned long) stack) |
---|
| 136 | + printk(state.reliable ? "%s [<%016lx>] %pSR \n" : |
---|
| 137 | + "%s([<%016lx>] %pSR)\n", |
---|
| 138 | + loglvl, state.ip, (void *) state.ip); |
---|
| 139 | + debug_show_held_locks(task ? : current); |
---|
113 | 140 | } |
---|
114 | 141 | |
---|
115 | 142 | static void show_last_breaking_event(struct pt_regs *regs) |
---|
.. | .. |
---|
124 | 151 | char *mode; |
---|
125 | 152 | |
---|
126 | 153 | mode = user_mode(regs) ? "User" : "Krnl"; |
---|
127 | | - printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); |
---|
| 154 | + printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); |
---|
128 | 155 | if (!user_mode(regs)) |
---|
129 | 156 | pr_cont(" (%pSR)", (void *)regs->psw.addr); |
---|
130 | 157 | pr_cont("\n"); |
---|
.. | .. |
---|
149 | 176 | show_registers(regs); |
---|
150 | 177 | /* Show stack backtrace if pt_regs is from kernel mode */ |
---|
151 | 178 | if (!user_mode(regs)) |
---|
152 | | - show_stack(NULL, (unsigned long *) regs->gprs[15]); |
---|
| 179 | + show_stack(NULL, (unsigned long *) regs->gprs[15], KERN_DEFAULT); |
---|
153 | 180 | show_last_breaking_event(regs); |
---|
154 | 181 | } |
---|
155 | 182 | |
---|
.. | .. |
---|
169 | 196 | regs->int_code >> 17, ++die_counter); |
---|
170 | 197 | #ifdef CONFIG_PREEMPT |
---|
171 | 198 | pr_cont("PREEMPT "); |
---|
| 199 | +#elif defined(CONFIG_PREEMPT_RT) |
---|
| 200 | + pr_cont("PREEMPT_RT "); |
---|
172 | 201 | #endif |
---|
173 | | -#ifdef CONFIG_SMP |
---|
174 | 202 | pr_cont("SMP "); |
---|
175 | | -#endif |
---|
176 | 203 | if (debug_pagealloc_enabled()) |
---|
177 | 204 | pr_cont("DEBUG_PAGEALLOC"); |
---|
178 | 205 | pr_cont("\n"); |
---|
.. | .. |
---|
187 | 214 | if (panic_on_oops) |
---|
188 | 215 | panic("Fatal exception: panic_on_oops"); |
---|
189 | 216 | oops_exit(); |
---|
190 | | - do_exit(SIGSEGV); |
---|
| 217 | + make_task_dead(SIGSEGV); |
---|
191 | 218 | } |
---|