.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/kernel/traps.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1995-2009 Russell King |
---|
5 | 6 | * Copyright (C) 2012 ARM Ltd. |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | |
---|
20 | 9 | #include <linux/bug.h> |
---|
| 10 | +#include <linux/context_tracking.h> |
---|
21 | 11 | #include <linux/signal.h> |
---|
22 | 12 | #include <linux/personality.h> |
---|
23 | 13 | #include <linux/kallsyms.h> |
---|
| 14 | +#include <linux/kprobes.h> |
---|
24 | 15 | #include <linux/spinlock.h> |
---|
25 | 16 | #include <linux/uaccess.h> |
---|
26 | 17 | #include <linux/hardirq.h> |
---|
.. | .. |
---|
43 | 34 | #include <asm/daifflags.h> |
---|
44 | 35 | #include <asm/debug-monitors.h> |
---|
45 | 36 | #include <asm/esr.h> |
---|
| 37 | +#include <asm/exception.h> |
---|
| 38 | +#include <asm/extable.h> |
---|
46 | 39 | #include <asm/insn.h> |
---|
| 40 | +#include <asm/kprobes.h> |
---|
47 | 41 | #include <asm/traps.h> |
---|
48 | 42 | #include <asm/smp.h> |
---|
49 | 43 | #include <asm/stack_pointer.h> |
---|
.. | .. |
---|
51 | 45 | #include <asm/exception.h> |
---|
52 | 46 | #include <asm/system_misc.h> |
---|
53 | 47 | #include <asm/sysreg.h> |
---|
| 48 | + |
---|
| 49 | +#include <trace/hooks/traps.h> |
---|
| 50 | + |
---|
| 51 | +#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP) |
---|
| 52 | +#include <soc/rockchip/rk_minidump.h> |
---|
| 53 | +#endif |
---|
54 | 54 | |
---|
55 | 55 | static const char *handler[]= { |
---|
56 | 56 | "Synchronous Abort", |
---|
.. | .. |
---|
61 | 61 | |
---|
62 | 62 | int show_unhandled_signals = 0; |
---|
63 | 63 | |
---|
64 | | -static void dump_backtrace_entry(unsigned long where) |
---|
65 | | -{ |
---|
66 | | - printk(" %pS\n", (void *)where); |
---|
67 | | -} |
---|
68 | | - |
---|
69 | | -static void __dump_instr(const char *lvl, struct pt_regs *regs) |
---|
| 64 | +static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) |
---|
70 | 65 | { |
---|
71 | 66 | unsigned long addr = instruction_pointer(regs); |
---|
72 | 67 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; |
---|
73 | 68 | int i; |
---|
74 | 69 | |
---|
| 70 | + if (user_mode(regs)) |
---|
| 71 | + return; |
---|
| 72 | + |
---|
75 | 73 | for (i = -4; i < 1; i++) { |
---|
76 | 74 | unsigned int val, bad; |
---|
77 | 75 | |
---|
78 | | - bad = get_user(val, &((u32 *)addr)[i]); |
---|
| 76 | + bad = aarch64_insn_read(&((u32 *)addr)[i], &val); |
---|
79 | 77 | |
---|
80 | 78 | if (!bad) |
---|
81 | 79 | p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); |
---|
.. | .. |
---|
84 | 82 | break; |
---|
85 | 83 | } |
---|
86 | 84 | } |
---|
| 85 | + |
---|
87 | 86 | printk("%sCode: %s\n", lvl, str); |
---|
88 | | -} |
---|
89 | | - |
---|
90 | | -static void dump_instr(const char *lvl, struct pt_regs *regs) |
---|
91 | | -{ |
---|
92 | | - if (!user_mode(regs)) { |
---|
93 | | - mm_segment_t fs = get_fs(); |
---|
94 | | - set_fs(KERNEL_DS); |
---|
95 | | - __dump_instr(lvl, regs); |
---|
96 | | - set_fs(fs); |
---|
97 | | - } else { |
---|
98 | | - __dump_instr(lvl, regs); |
---|
99 | | - } |
---|
100 | | -} |
---|
101 | | - |
---|
102 | | -void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) |
---|
103 | | -{ |
---|
104 | | - struct stackframe frame; |
---|
105 | | - int skip = 0; |
---|
106 | | - |
---|
107 | | - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
---|
108 | | - |
---|
109 | | - if (regs) { |
---|
110 | | - if (user_mode(regs)) |
---|
111 | | - return; |
---|
112 | | - skip = 1; |
---|
113 | | - } |
---|
114 | | - |
---|
115 | | - if (!tsk) |
---|
116 | | - tsk = current; |
---|
117 | | - |
---|
118 | | - if (!try_get_task_stack(tsk)) |
---|
119 | | - return; |
---|
120 | | - |
---|
121 | | - if (tsk == current) { |
---|
122 | | - frame.fp = (unsigned long)__builtin_frame_address(0); |
---|
123 | | - frame.pc = (unsigned long)dump_backtrace; |
---|
124 | | - } else { |
---|
125 | | - /* |
---|
126 | | - * task blocked in __switch_to |
---|
127 | | - */ |
---|
128 | | - frame.fp = thread_saved_fp(tsk); |
---|
129 | | - frame.pc = thread_saved_pc(tsk); |
---|
130 | | - } |
---|
131 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
132 | | - frame.graph = tsk->curr_ret_stack; |
---|
133 | | -#endif |
---|
134 | | - |
---|
135 | | - printk("Call trace:\n"); |
---|
136 | | - do { |
---|
137 | | - /* skip until specified stack frame */ |
---|
138 | | - if (!skip) { |
---|
139 | | - dump_backtrace_entry(frame.pc); |
---|
140 | | - } else if (frame.fp == regs->regs[29]) { |
---|
141 | | - skip = 0; |
---|
142 | | - /* |
---|
143 | | - * Mostly, this is the case where this function is |
---|
144 | | - * called in panic/abort. As exception handler's |
---|
145 | | - * stack frame does not contain the corresponding pc |
---|
146 | | - * at which an exception has taken place, use regs->pc |
---|
147 | | - * instead. |
---|
148 | | - */ |
---|
149 | | - dump_backtrace_entry(regs->pc); |
---|
150 | | - } |
---|
151 | | - } while (!unwind_frame(tsk, &frame)); |
---|
152 | | - |
---|
153 | | - put_task_stack(tsk); |
---|
154 | | -} |
---|
155 | | - |
---|
156 | | -void show_stack(struct task_struct *tsk, unsigned long *sp) |
---|
157 | | -{ |
---|
158 | | - dump_backtrace(NULL, tsk); |
---|
159 | | - barrier(); |
---|
160 | 87 | } |
---|
161 | 88 | |
---|
162 | 89 | #ifdef CONFIG_PREEMPT |
---|
163 | 90 | #define S_PREEMPT " PREEMPT" |
---|
| 91 | +#elif defined(CONFIG_PREEMPT_RT) |
---|
| 92 | +#define S_PREEMPT " PREEMPT_RT" |
---|
164 | 93 | #else |
---|
165 | 94 | #define S_PREEMPT "" |
---|
166 | 95 | #endif |
---|
| 96 | + |
---|
167 | 97 | #define S_SMP " SMP" |
---|
168 | 98 | |
---|
169 | 99 | static int __die(const char *str, int err, struct pt_regs *regs) |
---|
170 | 100 | { |
---|
171 | | - struct task_struct *tsk = current; |
---|
172 | 101 | static int die_counter; |
---|
173 | 102 | int ret; |
---|
174 | 103 | |
---|
.. | .. |
---|
181 | 110 | return ret; |
---|
182 | 111 | |
---|
183 | 112 | print_modules(); |
---|
184 | | - pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", |
---|
185 | | - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), |
---|
186 | | - end_of_stack(tsk)); |
---|
187 | 113 | show_regs(regs); |
---|
188 | 114 | |
---|
189 | | - if (!user_mode(regs)) |
---|
190 | | - dump_instr(KERN_EMERG, regs); |
---|
| 115 | + dump_kernel_instr(KERN_EMERG, regs); |
---|
191 | 116 | |
---|
192 | 117 | return ret; |
---|
193 | 118 | } |
---|
.. | .. |
---|
202 | 127 | int ret; |
---|
203 | 128 | unsigned long flags; |
---|
204 | 129 | |
---|
| 130 | +#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP) |
---|
| 131 | + rk_minidump_update_cpu_regs(regs); |
---|
| 132 | +#endif |
---|
205 | 133 | raw_spin_lock_irqsave(&die_lock, flags); |
---|
206 | 134 | |
---|
207 | 135 | oops_enter(); |
---|
.. | .. |
---|
218 | 146 | oops_exit(); |
---|
219 | 147 | |
---|
220 | 148 | if (in_interrupt()) |
---|
221 | | - panic("Fatal exception in interrupt"); |
---|
| 149 | + panic("%s: Fatal exception in interrupt", str); |
---|
222 | 150 | if (panic_on_oops) |
---|
223 | | - panic("Fatal exception"); |
---|
| 151 | + panic("%s: Fatal exception", str); |
---|
224 | 152 | |
---|
225 | 153 | raw_spin_unlock_irqrestore(&die_lock, flags); |
---|
226 | 154 | |
---|
227 | 155 | if (ret != NOTIFY_STOP) |
---|
228 | | - do_exit(SIGSEGV); |
---|
| 156 | + make_task_dead(SIGSEGV); |
---|
229 | 157 | } |
---|
230 | 158 | |
---|
231 | | -static bool show_unhandled_signals_ratelimited(void) |
---|
| 159 | +static void arm64_show_signal(int signo, const char *str) |
---|
232 | 160 | { |
---|
233 | 161 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, |
---|
234 | 162 | DEFAULT_RATELIMIT_BURST); |
---|
235 | | - return show_unhandled_signals && __ratelimit(&rs); |
---|
236 | | -} |
---|
237 | | - |
---|
238 | | -void arm64_force_sig_info(struct siginfo *info, const char *str, |
---|
239 | | - struct task_struct *tsk) |
---|
240 | | -{ |
---|
| 163 | + struct task_struct *tsk = current; |
---|
241 | 164 | unsigned int esr = tsk->thread.fault_code; |
---|
242 | 165 | struct pt_regs *regs = task_pt_regs(tsk); |
---|
243 | 166 | |
---|
244 | | - if (!unhandled_signal(tsk, info->si_signo)) |
---|
245 | | - goto send_sig; |
---|
246 | | - |
---|
247 | | - if (!show_unhandled_signals_ratelimited()) |
---|
248 | | - goto send_sig; |
---|
| 167 | + /* Leave if the signal won't be shown */ |
---|
| 168 | + if (!show_unhandled_signals || |
---|
| 169 | + !unhandled_signal(tsk, signo) || |
---|
| 170 | + !__ratelimit(&rs)) |
---|
| 171 | + return; |
---|
249 | 172 | |
---|
250 | 173 | pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); |
---|
251 | 174 | if (esr) |
---|
.. | .. |
---|
255 | 178 | print_vma_addr(KERN_CONT " in ", regs->pc); |
---|
256 | 179 | pr_cont("\n"); |
---|
257 | 180 | __show_regs(regs); |
---|
| 181 | +} |
---|
258 | 182 | |
---|
259 | | -send_sig: |
---|
260 | | - force_sig_info(info->si_signo, info, tsk); |
---|
| 183 | +void arm64_force_sig_fault(int signo, int code, unsigned long far, |
---|
| 184 | + const char *str) |
---|
| 185 | +{ |
---|
| 186 | + arm64_show_signal(signo, str); |
---|
| 187 | + if (signo == SIGKILL) |
---|
| 188 | + force_sig(SIGKILL); |
---|
| 189 | + else |
---|
| 190 | + force_sig_fault(signo, code, (void __user *)far); |
---|
| 191 | +} |
---|
| 192 | + |
---|
| 193 | +void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, |
---|
| 194 | + const char *str) |
---|
| 195 | +{ |
---|
| 196 | + arm64_show_signal(SIGBUS, str); |
---|
| 197 | + force_sig_mceerr(code, (void __user *)far, lsb); |
---|
| 198 | +} |
---|
| 199 | + |
---|
| 200 | +void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, |
---|
| 201 | + const char *str) |
---|
| 202 | +{ |
---|
| 203 | + arm64_show_signal(SIGTRAP, str); |
---|
| 204 | + force_sig_ptrace_errno_trap(errno, (void __user *)far); |
---|
261 | 205 | } |
---|
262 | 206 | |
---|
263 | 207 | void arm64_notify_die(const char *str, struct pt_regs *regs, |
---|
264 | | - struct siginfo *info, int err) |
---|
| 208 | + int signo, int sicode, unsigned long far, |
---|
| 209 | + int err) |
---|
265 | 210 | { |
---|
266 | 211 | if (user_mode(regs)) { |
---|
267 | 212 | WARN_ON(regs != current_pt_regs()); |
---|
268 | 213 | current->thread.fault_address = 0; |
---|
269 | 214 | current->thread.fault_code = err; |
---|
270 | | - arm64_force_sig_info(info, str, current); |
---|
| 215 | + |
---|
| 216 | + arm64_force_sig_fault(signo, sicode, far, str); |
---|
271 | 217 | } else { |
---|
272 | 218 | die(str, regs, err); |
---|
273 | 219 | } |
---|
274 | 220 | } |
---|
| 221 | + |
---|
| 222 | +#ifdef CONFIG_COMPAT |
---|
| 223 | +#define PSTATE_IT_1_0_SHIFT 25 |
---|
| 224 | +#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) |
---|
| 225 | +#define PSTATE_IT_7_2_SHIFT 10 |
---|
| 226 | +#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) |
---|
| 227 | + |
---|
| 228 | +static u32 compat_get_it_state(struct pt_regs *regs) |
---|
| 229 | +{ |
---|
| 230 | + u32 it, pstate = regs->pstate; |
---|
| 231 | + |
---|
| 232 | + it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; |
---|
| 233 | + it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; |
---|
| 234 | + |
---|
| 235 | + return it; |
---|
| 236 | +} |
---|
| 237 | + |
---|
| 238 | +static void compat_set_it_state(struct pt_regs *regs, u32 it) |
---|
| 239 | +{ |
---|
| 240 | + u32 pstate_it; |
---|
| 241 | + |
---|
| 242 | + pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; |
---|
| 243 | + pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; |
---|
| 244 | + |
---|
| 245 | + regs->pstate &= ~PSR_AA32_IT_MASK; |
---|
| 246 | + regs->pstate |= pstate_it; |
---|
| 247 | +} |
---|
| 248 | + |
---|
| 249 | +static void advance_itstate(struct pt_regs *regs) |
---|
| 250 | +{ |
---|
| 251 | + u32 it; |
---|
| 252 | + |
---|
| 253 | + /* ARM mode */ |
---|
| 254 | + if (!(regs->pstate & PSR_AA32_T_BIT) || |
---|
| 255 | + !(regs->pstate & PSR_AA32_IT_MASK)) |
---|
| 256 | + return; |
---|
| 257 | + |
---|
| 258 | + it = compat_get_it_state(regs); |
---|
| 259 | + |
---|
| 260 | + /* |
---|
| 261 | + * If this is the last instruction of the block, wipe the IT |
---|
| 262 | + * state. Otherwise advance it. |
---|
| 263 | + */ |
---|
| 264 | + if (!(it & 7)) |
---|
| 265 | + it = 0; |
---|
| 266 | + else |
---|
| 267 | + it = (it & 0xe0) | ((it << 1) & 0x1f); |
---|
| 268 | + |
---|
| 269 | + compat_set_it_state(regs, it); |
---|
| 270 | +} |
---|
| 271 | +#else |
---|
| 272 | +static void advance_itstate(struct pt_regs *regs) |
---|
| 273 | +{ |
---|
| 274 | +} |
---|
| 275 | +#endif |
---|
275 | 276 | |
---|
276 | 277 | void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) |
---|
277 | 278 | { |
---|
.. | .. |
---|
283 | 284 | */ |
---|
284 | 285 | if (user_mode(regs)) |
---|
285 | 286 | user_fastforward_single_step(current); |
---|
| 287 | + |
---|
| 288 | + if (compat_user_mode(regs)) |
---|
| 289 | + advance_itstate(regs); |
---|
| 290 | + else |
---|
| 291 | + regs->pstate &= ~PSR_BTYPE_MASK; |
---|
286 | 292 | } |
---|
287 | 293 | |
---|
288 | 294 | static LIST_HEAD(undef_hook); |
---|
.. | .. |
---|
316 | 322 | |
---|
317 | 323 | if (!user_mode(regs)) { |
---|
318 | 324 | __le32 instr_le; |
---|
319 | | - if (probe_kernel_address((__force __le32 *)pc, instr_le)) |
---|
| 325 | + if (get_kernel_nofault(instr_le, (__force __le32 *)pc)) |
---|
320 | 326 | goto exit; |
---|
321 | 327 | instr = le32_to_cpu(instr_le); |
---|
322 | 328 | } else if (compat_thumb_mode(regs)) { |
---|
.. | .. |
---|
352 | 358 | return fn ? fn(regs, instr) : 1; |
---|
353 | 359 | } |
---|
354 | 360 | |
---|
355 | | -void force_signal_inject(int signal, int code, unsigned long address) |
---|
| 361 | +void force_signal_inject(int signal, int code, unsigned long address, unsigned int err) |
---|
356 | 362 | { |
---|
357 | | - siginfo_t info; |
---|
358 | 363 | const char *desc; |
---|
359 | 364 | struct pt_regs *regs = current_pt_regs(); |
---|
360 | 365 | |
---|
361 | | - clear_siginfo(&info); |
---|
| 366 | + if (WARN_ON(!user_mode(regs))) |
---|
| 367 | + return; |
---|
362 | 368 | |
---|
363 | 369 | switch (signal) { |
---|
364 | 370 | case SIGILL: |
---|
.. | .. |
---|
378 | 384 | signal = SIGKILL; |
---|
379 | 385 | } |
---|
380 | 386 | |
---|
381 | | - info.si_signo = signal; |
---|
382 | | - info.si_errno = 0; |
---|
383 | | - info.si_code = code; |
---|
384 | | - info.si_addr = (void __user *)address; |
---|
385 | | - |
---|
386 | | - arm64_notify_die(desc, regs, &info, 0); |
---|
| 387 | + arm64_notify_die(desc, regs, signal, code, address, err); |
---|
387 | 388 | } |
---|
388 | 389 | |
---|
389 | 390 | /* |
---|
.. | .. |
---|
393 | 394 | { |
---|
394 | 395 | int code; |
---|
395 | 396 | |
---|
396 | | - down_read(¤t->mm->mmap_sem); |
---|
397 | | - if (find_vma(current->mm, addr) == NULL) |
---|
| 397 | + mmap_read_lock(current->mm); |
---|
| 398 | + if (find_vma(current->mm, untagged_addr(addr)) == NULL) |
---|
398 | 399 | code = SEGV_MAPERR; |
---|
399 | 400 | else |
---|
400 | 401 | code = SEGV_ACCERR; |
---|
401 | | - up_read(¤t->mm->mmap_sem); |
---|
| 402 | + mmap_read_unlock(current->mm); |
---|
402 | 403 | |
---|
403 | | - force_signal_inject(SIGSEGV, code, addr); |
---|
| 404 | + force_signal_inject(SIGSEGV, code, addr, 0); |
---|
404 | 405 | } |
---|
405 | 406 | |
---|
406 | | -asmlinkage void __exception do_undefinstr(struct pt_regs *regs) |
---|
| 407 | +void do_undefinstr(struct pt_regs *regs) |
---|
407 | 408 | { |
---|
408 | 409 | /* check for AArch32 breakpoint instructions */ |
---|
409 | 410 | if (!aarch32_break_handler(regs)) |
---|
.. | .. |
---|
412 | 413 | if (call_undef_hook(regs) == 0) |
---|
413 | 414 | return; |
---|
414 | 415 | |
---|
415 | | - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); |
---|
| 416 | + trace_android_rvh_do_undefinstr(regs, user_mode(regs)); |
---|
416 | 417 | BUG_ON(!user_mode(regs)); |
---|
| 418 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
417 | 419 | } |
---|
| 420 | +NOKPROBE_SYMBOL(do_undefinstr); |
---|
418 | 421 | |
---|
419 | | -void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) |
---|
| 422 | +void do_bti(struct pt_regs *regs) |
---|
420 | 423 | { |
---|
421 | | - sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); |
---|
| 424 | + BUG_ON(!user_mode(regs)); |
---|
| 425 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
422 | 426 | } |
---|
| 427 | +NOKPROBE_SYMBOL(do_bti); |
---|
| 428 | + |
---|
| 429 | +void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) |
---|
| 430 | +{ |
---|
| 431 | + /* |
---|
| 432 | + * Unexpected FPAC exception or pointer authentication failure in |
---|
| 433 | + * the kernel: kill the task before it does any more harm. |
---|
| 434 | + */ |
---|
| 435 | + trace_android_rvh_do_ptrauth_fault(regs, esr, user_mode(regs)); |
---|
| 436 | + BUG_ON(!user_mode(regs)); |
---|
| 437 | + force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr); |
---|
| 438 | +} |
---|
| 439 | +NOKPROBE_SYMBOL(do_ptrauth_fault); |
---|
423 | 440 | |
---|
424 | 441 | #define __user_cache_maint(insn, address, res) \ |
---|
425 | 442 | if (address >= user_addr_max()) { \ |
---|
.. | .. |
---|
443 | 460 | |
---|
444 | 461 | static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) |
---|
445 | 462 | { |
---|
446 | | - unsigned long address; |
---|
447 | | - int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; |
---|
| 463 | + unsigned long tagged_address, address; |
---|
| 464 | + int rt = ESR_ELx_SYS64_ISS_RT(esr); |
---|
448 | 465 | int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; |
---|
449 | 466 | int ret = 0; |
---|
450 | 467 | |
---|
451 | | - address = untagged_addr(pt_regs_read_reg(regs, rt)); |
---|
| 468 | + tagged_address = pt_regs_read_reg(regs, rt); |
---|
| 469 | + address = untagged_addr(tagged_address); |
---|
452 | 470 | |
---|
453 | 471 | switch (crm) { |
---|
454 | 472 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ |
---|
.. | .. |
---|
456 | 474 | break; |
---|
457 | 475 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ |
---|
458 | 476 | __user_cache_maint("dc civac", address, ret); |
---|
| 477 | + break; |
---|
| 478 | + case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ |
---|
| 479 | + __user_cache_maint("sys 3, c7, c13, 1", address, ret); |
---|
459 | 480 | break; |
---|
460 | 481 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ |
---|
461 | 482 | __user_cache_maint("sys 3, c7, c12, 1", address, ret); |
---|
.. | .. |
---|
467 | 488 | __user_cache_maint("ic ivau", address, ret); |
---|
468 | 489 | break; |
---|
469 | 490 | default: |
---|
470 | | - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); |
---|
| 491 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
471 | 492 | return; |
---|
472 | 493 | } |
---|
473 | 494 | |
---|
474 | 495 | if (ret) |
---|
475 | | - arm64_notify_segfault(address); |
---|
| 496 | + arm64_notify_segfault(tagged_address); |
---|
476 | 497 | else |
---|
477 | 498 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
---|
478 | 499 | } |
---|
479 | 500 | |
---|
480 | 501 | static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) |
---|
481 | 502 | { |
---|
482 | | - int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; |
---|
| 503 | + int rt = ESR_ELx_SYS64_ISS_RT(esr); |
---|
483 | 504 | unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); |
---|
484 | 505 | |
---|
485 | 506 | if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { |
---|
.. | .. |
---|
498 | 519 | |
---|
499 | 520 | static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) |
---|
500 | 521 | { |
---|
501 | | - int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; |
---|
| 522 | + int rt = ESR_ELx_SYS64_ISS_RT(esr); |
---|
502 | 523 | |
---|
503 | | - pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); |
---|
| 524 | + pt_regs_write_reg(regs, rt, arch_timer_read_counter()); |
---|
504 | 525 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
---|
505 | 526 | } |
---|
506 | 527 | |
---|
507 | 528 | static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) |
---|
508 | 529 | { |
---|
509 | | - int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; |
---|
| 530 | + int rt = ESR_ELx_SYS64_ISS_RT(esr); |
---|
510 | 531 | |
---|
511 | 532 | pt_regs_write_reg(regs, rt, arch_timer_get_rate()); |
---|
| 533 | + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
---|
| 534 | +} |
---|
| 535 | + |
---|
| 536 | +static void mrs_handler(unsigned int esr, struct pt_regs *regs) |
---|
| 537 | +{ |
---|
| 538 | + u32 sysreg, rt; |
---|
| 539 | + |
---|
| 540 | + rt = ESR_ELx_SYS64_ISS_RT(esr); |
---|
| 541 | + sysreg = esr_sys64_to_sysreg(esr); |
---|
| 542 | + |
---|
| 543 | + if (do_emulate_mrs(regs, sysreg, rt) != 0) |
---|
| 544 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
| 545 | +} |
---|
| 546 | + |
---|
| 547 | +static void wfi_handler(unsigned int esr, struct pt_regs *regs) |
---|
| 548 | +{ |
---|
512 | 549 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
---|
513 | 550 | } |
---|
514 | 551 | |
---|
.. | .. |
---|
518 | 555 | void (*handler)(unsigned int esr, struct pt_regs *regs); |
---|
519 | 556 | }; |
---|
520 | 557 | |
---|
521 | | -static struct sys64_hook sys64_hooks[] = { |
---|
| 558 | +static const struct sys64_hook sys64_hooks[] = { |
---|
522 | 559 | { |
---|
523 | 560 | .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, |
---|
524 | 561 | .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, |
---|
.. | .. |
---|
542 | 579 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, |
---|
543 | 580 | .handler = cntfrq_read_handler, |
---|
544 | 581 | }, |
---|
| 582 | + { |
---|
| 583 | + /* Trap read access to CPUID registers */ |
---|
| 584 | + .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK, |
---|
| 585 | + .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL, |
---|
| 586 | + .handler = mrs_handler, |
---|
| 587 | + }, |
---|
| 588 | + { |
---|
| 589 | + /* Trap WFI instructions executed in userspace */ |
---|
| 590 | + .esr_mask = ESR_ELx_WFx_MASK, |
---|
| 591 | + .esr_val = ESR_ELx_WFx_WFI_VAL, |
---|
| 592 | + .handler = wfi_handler, |
---|
| 593 | + }, |
---|
545 | 594 | {}, |
---|
546 | 595 | }; |
---|
547 | 596 | |
---|
548 | | -asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) |
---|
| 597 | +#ifdef CONFIG_COMPAT |
---|
| 598 | +static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) |
---|
549 | 599 | { |
---|
550 | | - struct sys64_hook *hook; |
---|
| 600 | + int cond; |
---|
| 601 | + |
---|
| 602 | + /* Only a T32 instruction can trap without CV being set */ |
---|
| 603 | + if (!(esr & ESR_ELx_CV)) { |
---|
| 604 | + u32 it; |
---|
| 605 | + |
---|
| 606 | + it = compat_get_it_state(regs); |
---|
| 607 | + if (!it) |
---|
| 608 | + return true; |
---|
| 609 | + |
---|
| 610 | + cond = it >> 4; |
---|
| 611 | + } else { |
---|
| 612 | + cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; |
---|
| 613 | + } |
---|
| 614 | + |
---|
| 615 | + return aarch32_opcode_cond_checks[cond](regs->pstate); |
---|
| 616 | +} |
---|
| 617 | + |
---|
| 618 | +static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) |
---|
| 619 | +{ |
---|
| 620 | + int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; |
---|
| 621 | + |
---|
| 622 | + pt_regs_write_reg(regs, reg, arch_timer_get_rate()); |
---|
| 623 | + arm64_skip_faulting_instruction(regs, 4); |
---|
| 624 | +} |
---|
| 625 | + |
---|
| 626 | +static const struct sys64_hook cp15_32_hooks[] = { |
---|
| 627 | + { |
---|
| 628 | + .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, |
---|
| 629 | + .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, |
---|
| 630 | + .handler = compat_cntfrq_read_handler, |
---|
| 631 | + }, |
---|
| 632 | + {}, |
---|
| 633 | +}; |
---|
| 634 | + |
---|
| 635 | +static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) |
---|
| 636 | +{ |
---|
| 637 | + int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; |
---|
| 638 | + int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; |
---|
| 639 | + u64 val = arch_timer_read_counter(); |
---|
| 640 | + |
---|
| 641 | + pt_regs_write_reg(regs, rt, lower_32_bits(val)); |
---|
| 642 | + pt_regs_write_reg(regs, rt2, upper_32_bits(val)); |
---|
| 643 | + arm64_skip_faulting_instruction(regs, 4); |
---|
| 644 | +} |
---|
| 645 | + |
---|
| 646 | +static const struct sys64_hook cp15_64_hooks[] = { |
---|
| 647 | + { |
---|
| 648 | + .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, |
---|
| 649 | + .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, |
---|
| 650 | + .handler = compat_cntvct_read_handler, |
---|
| 651 | + }, |
---|
| 652 | + {}, |
---|
| 653 | +}; |
---|
| 654 | + |
---|
| 655 | +void do_cp15instr(unsigned int esr, struct pt_regs *regs) |
---|
| 656 | +{ |
---|
| 657 | + const struct sys64_hook *hook, *hook_base; |
---|
| 658 | + |
---|
| 659 | + if (!cp15_cond_valid(esr, regs)) { |
---|
| 660 | + /* |
---|
| 661 | + * There is no T16 variant of a CP access, so we |
---|
| 662 | + * always advance PC by 4 bytes. |
---|
| 663 | + */ |
---|
| 664 | + arm64_skip_faulting_instruction(regs, 4); |
---|
| 665 | + return; |
---|
| 666 | + } |
---|
| 667 | + |
---|
| 668 | + switch (ESR_ELx_EC(esr)) { |
---|
| 669 | + case ESR_ELx_EC_CP15_32: |
---|
| 670 | + hook_base = cp15_32_hooks; |
---|
| 671 | + break; |
---|
| 672 | + case ESR_ELx_EC_CP15_64: |
---|
| 673 | + hook_base = cp15_64_hooks; |
---|
| 674 | + break; |
---|
| 675 | + default: |
---|
| 676 | + do_undefinstr(regs); |
---|
| 677 | + return; |
---|
| 678 | + } |
---|
| 679 | + |
---|
| 680 | + for (hook = hook_base; hook->handler; hook++) |
---|
| 681 | + if ((hook->esr_mask & esr) == hook->esr_val) { |
---|
| 682 | + hook->handler(esr, regs); |
---|
| 683 | + return; |
---|
| 684 | + } |
---|
| 685 | + |
---|
| 686 | + /* |
---|
| 687 | + * New cp15 instructions may previously have been undefined at |
---|
| 688 | + * EL0. Fall back to our usual undefined instruction handler |
---|
| 689 | + * so that we handle these consistently. |
---|
| 690 | + */ |
---|
| 691 | + do_undefinstr(regs); |
---|
| 692 | +} |
---|
| 693 | +NOKPROBE_SYMBOL(do_cp15instr); |
---|
| 694 | +#endif |
---|
| 695 | + |
---|
| 696 | +void do_sysinstr(unsigned int esr, struct pt_regs *regs) |
---|
| 697 | +{ |
---|
| 698 | + const struct sys64_hook *hook; |
---|
551 | 699 | |
---|
552 | 700 | for (hook = sys64_hooks; hook->handler; hook++) |
---|
553 | 701 | if ((hook->esr_mask & esr) == hook->esr_val) { |
---|
.. | .. |
---|
562 | 710 | */ |
---|
563 | 711 | do_undefinstr(regs); |
---|
564 | 712 | } |
---|
| 713 | +NOKPROBE_SYMBOL(do_sysinstr); |
---|
565 | 714 | |
---|
566 | 715 | static const char *esr_class_str[] = { |
---|
567 | 716 | [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", |
---|
.. | .. |
---|
573 | 722 | [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", |
---|
574 | 723 | [ESR_ELx_EC_FP_ASIMD] = "ASIMD", |
---|
575 | 724 | [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", |
---|
| 725 | + [ESR_ELx_EC_PAC] = "PAC", |
---|
576 | 726 | [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", |
---|
| 727 | + [ESR_ELx_EC_BTI] = "BTI", |
---|
577 | 728 | [ESR_ELx_EC_ILL] = "PSTATE.IL", |
---|
578 | 729 | [ESR_ELx_EC_SVC32] = "SVC (AArch32)", |
---|
579 | 730 | [ESR_ELx_EC_HVC32] = "HVC (AArch32)", |
---|
.. | .. |
---|
583 | 734 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)", |
---|
584 | 735 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", |
---|
585 | 736 | [ESR_ELx_EC_SVE] = "SVE", |
---|
| 737 | + [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", |
---|
| 738 | + [ESR_ELx_EC_FPAC] = "FPAC", |
---|
586 | 739 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", |
---|
587 | 740 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", |
---|
588 | 741 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", |
---|
.. | .. |
---|
613 | 766 | * bad_mode handles the impossible case in the exception vector. This is always |
---|
614 | 767 | * fatal. |
---|
615 | 768 | */ |
---|
616 | | -asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) |
---|
| 769 | +asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr) |
---|
617 | 770 | { |
---|
| 771 | + arm64_enter_nmi(regs); |
---|
| 772 | + |
---|
618 | 773 | console_verbose(); |
---|
619 | 774 | |
---|
620 | 775 | pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", |
---|
621 | 776 | handler[reason], smp_processor_id(), esr, |
---|
622 | 777 | esr_get_class_string(esr)); |
---|
623 | 778 | |
---|
| 779 | + trace_android_rvh_bad_mode(regs, esr, reason); |
---|
| 780 | + __show_regs(regs); |
---|
624 | 781 | local_daif_mask(); |
---|
625 | 782 | panic("bad mode"); |
---|
626 | 783 | } |
---|
.. | .. |
---|
629 | 786 | * bad_el0_sync handles unexpected, but potentially recoverable synchronous |
---|
630 | 787 | * exceptions taken from EL0. Unlike bad_mode, this returns. |
---|
631 | 788 | */ |
---|
632 | | -asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) |
---|
| 789 | +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) |
---|
633 | 790 | { |
---|
634 | | - siginfo_t info; |
---|
635 | | - void __user *pc = (void __user *)instruction_pointer(regs); |
---|
636 | | - |
---|
637 | | - clear_siginfo(&info); |
---|
638 | | - info.si_signo = SIGILL; |
---|
639 | | - info.si_errno = 0; |
---|
640 | | - info.si_code = ILL_ILLOPC; |
---|
641 | | - info.si_addr = pc; |
---|
| 791 | + unsigned long pc = instruction_pointer(regs); |
---|
642 | 792 | |
---|
643 | 793 | current->thread.fault_address = 0; |
---|
644 | 794 | current->thread.fault_code = esr; |
---|
645 | 795 | |
---|
646 | | - arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current); |
---|
| 796 | + arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, |
---|
| 797 | + "Bad EL0 synchronous exception"); |
---|
647 | 798 | } |
---|
648 | 799 | |
---|
649 | 800 | #ifdef CONFIG_VMAP_STACK |
---|
.. | .. |
---|
651 | 802 | DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) |
---|
652 | 803 | __aligned(16); |
---|
653 | 804 | |
---|
654 | | -asmlinkage void handle_bad_stack(struct pt_regs *regs) |
---|
| 805 | +asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) |
---|
655 | 806 | { |
---|
656 | 807 | unsigned long tsk_stk = (unsigned long)current->stack; |
---|
657 | 808 | unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); |
---|
658 | 809 | unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); |
---|
659 | 810 | unsigned int esr = read_sysreg(esr_el1); |
---|
660 | 811 | unsigned long far = read_sysreg(far_el1); |
---|
| 812 | + |
---|
| 813 | + arm64_enter_nmi(regs); |
---|
661 | 814 | |
---|
662 | 815 | console_verbose(); |
---|
663 | 816 | pr_emerg("Insufficient stack space to handle exception!"); |
---|
.. | .. |
---|
668 | 821 | pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", |
---|
669 | 822 | tsk_stk, tsk_stk + THREAD_SIZE); |
---|
670 | 823 | pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n", |
---|
671 | | - irq_stk, irq_stk + THREAD_SIZE); |
---|
| 824 | + irq_stk, irq_stk + IRQ_STACK_SIZE); |
---|
672 | 825 | pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", |
---|
673 | 826 | ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); |
---|
674 | 827 | |
---|
.. | .. |
---|
689 | 842 | |
---|
690 | 843 | pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", |
---|
691 | 844 | smp_processor_id(), esr, esr_get_class_string(esr)); |
---|
| 845 | + |
---|
| 846 | + trace_android_rvh_arm64_serror_panic(regs, esr); |
---|
692 | 847 | if (regs) |
---|
693 | 848 | __show_regs(regs); |
---|
694 | 849 | |
---|
.. | .. |
---|
716 | 871 | /* |
---|
717 | 872 | * The CPU can't make progress. The exception may have |
---|
718 | 873 | * been imprecise. |
---|
| 874 | + * |
---|
| 875 | + * Neoverse-N1 #1349291 means a non-KVM SError reported as |
---|
| 876 | + * Unrecoverable should be treated as Uncontainable. We |
---|
| 877 | + * call arm64_serror_panic() in both cases. |
---|
719 | 878 | */ |
---|
720 | 879 | return true; |
---|
721 | 880 | |
---|
.. | .. |
---|
726 | 885 | } |
---|
727 | 886 | } |
---|
728 | 887 | |
---|
729 | | -asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) |
---|
| 888 | +asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr) |
---|
730 | 889 | { |
---|
731 | | - nmi_enter(); |
---|
| 890 | + arm64_enter_nmi(regs); |
---|
732 | 891 | |
---|
733 | 892 | /* non-RAS errors are not containable */ |
---|
734 | 893 | if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) |
---|
735 | 894 | arm64_serror_panic(regs, esr); |
---|
736 | 895 | |
---|
737 | | - nmi_exit(); |
---|
738 | | -} |
---|
739 | | - |
---|
740 | | -void __pte_error(const char *file, int line, unsigned long val) |
---|
741 | | -{ |
---|
742 | | - pr_err("%s:%d: bad pte %016lx.\n", file, line, val); |
---|
743 | | -} |
---|
744 | | - |
---|
745 | | -void __pmd_error(const char *file, int line, unsigned long val) |
---|
746 | | -{ |
---|
747 | | - pr_err("%s:%d: bad pmd %016lx.\n", file, line, val); |
---|
748 | | -} |
---|
749 | | - |
---|
750 | | -void __pud_error(const char *file, int line, unsigned long val) |
---|
751 | | -{ |
---|
752 | | - pr_err("%s:%d: bad pud %016lx.\n", file, line, val); |
---|
753 | | -} |
---|
754 | | - |
---|
755 | | -void __pgd_error(const char *file, int line, unsigned long val) |
---|
756 | | -{ |
---|
757 | | - pr_err("%s:%d: bad pgd %016lx.\n", file, line, val); |
---|
| 896 | + arm64_exit_nmi(regs); |
---|
758 | 897 | } |
---|
759 | 898 | |
---|
760 | 899 | /* GENERIC_BUG traps */ |
---|
.. | .. |
---|
773 | 912 | |
---|
774 | 913 | static int bug_handler(struct pt_regs *regs, unsigned int esr) |
---|
775 | 914 | { |
---|
776 | | - if (user_mode(regs)) |
---|
777 | | - return DBG_HOOK_ERROR; |
---|
778 | | - |
---|
779 | 915 | switch (report_bug(regs->pc, regs)) { |
---|
780 | 916 | case BUG_TRAP_TYPE_BUG: |
---|
781 | 917 | die("Oops - BUG", regs, 0); |
---|
.. | .. |
---|
795 | 931 | } |
---|
796 | 932 | |
---|
797 | 933 | static struct break_hook bug_break_hook = { |
---|
798 | | - .esr_val = 0xf2000000 | BUG_BRK_IMM, |
---|
799 | | - .esr_mask = 0xffffffff, |
---|
800 | 934 | .fn = bug_handler, |
---|
| 935 | + .imm = BUG_BRK_IMM, |
---|
| 936 | +}; |
---|
| 937 | + |
---|
| 938 | +static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) |
---|
| 939 | +{ |
---|
| 940 | + pr_err("%s generated an invalid instruction at %pS!\n", |
---|
| 941 | + "Kernel text patching", |
---|
| 942 | + (void *)instruction_pointer(regs)); |
---|
| 943 | + |
---|
| 944 | + /* We cannot handle this */ |
---|
| 945 | + return DBG_HOOK_ERROR; |
---|
| 946 | +} |
---|
| 947 | + |
---|
| 948 | +static struct break_hook fault_break_hook = { |
---|
| 949 | + .fn = reserved_fault_handler, |
---|
| 950 | + .imm = FAULT_BRK_IMM, |
---|
801 | 951 | }; |
---|
802 | 952 | |
---|
803 | 953 | #ifdef CONFIG_KASAN_SW_TAGS |
---|
.. | .. |
---|
814 | 964 | size_t size = KASAN_ESR_SIZE(esr); |
---|
815 | 965 | u64 addr = regs->regs[0]; |
---|
816 | 966 | u64 pc = regs->pc; |
---|
817 | | - |
---|
818 | | - if (user_mode(regs)) |
---|
819 | | - return DBG_HOOK_ERROR; |
---|
820 | 967 | |
---|
821 | 968 | kasan_report(addr, size, write, pc); |
---|
822 | 969 | |
---|
.. | .. |
---|
842 | 989 | return DBG_HOOK_HANDLED; |
---|
843 | 990 | } |
---|
844 | 991 | |
---|
845 | | -#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM) |
---|
846 | | -#define KASAN_ESR_MASK 0xffffff00 |
---|
847 | | - |
---|
848 | 992 | static struct break_hook kasan_break_hook = { |
---|
849 | | - .esr_val = KASAN_ESR_VAL, |
---|
850 | | - .esr_mask = KASAN_ESR_MASK, |
---|
851 | | - .fn = kasan_handler, |
---|
| 993 | + .fn = kasan_handler, |
---|
| 994 | + .imm = KASAN_BRK_IMM, |
---|
| 995 | + .mask = KASAN_BRK_MASK, |
---|
852 | 996 | }; |
---|
853 | 997 | #endif |
---|
854 | 998 | |
---|
.. | .. |
---|
860 | 1004 | struct pt_regs *regs) |
---|
861 | 1005 | { |
---|
862 | 1006 | #ifdef CONFIG_KASAN_SW_TAGS |
---|
863 | | - if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL) |
---|
| 1007 | + unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; |
---|
| 1008 | + |
---|
| 1009 | + if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) |
---|
864 | 1010 | return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; |
---|
865 | 1011 | #endif |
---|
866 | 1012 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; |
---|
867 | 1013 | } |
---|
868 | 1014 | |
---|
869 | | -/* This registration must happen early, before debug_traps_init(). */ |
---|
870 | 1015 | void __init trap_init(void) |
---|
871 | 1016 | { |
---|
872 | | - register_break_hook(&bug_break_hook); |
---|
| 1017 | + register_kernel_break_hook(&bug_break_hook); |
---|
| 1018 | + register_kernel_break_hook(&fault_break_hook); |
---|
873 | 1019 | #ifdef CONFIG_KASAN_SW_TAGS |
---|
874 | | - register_break_hook(&kasan_break_hook); |
---|
| 1020 | + register_kernel_break_hook(&kasan_break_hook); |
---|
875 | 1021 | #endif |
---|
| 1022 | + debug_traps_init(); |
---|
876 | 1023 | } |
---|