forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/arm64/kernel/traps.c
....@@ -1,26 +1,17 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Based on arch/arm/kernel/traps.c
34 *
45 * Copyright (C) 1995-2009 Russell King
56 * Copyright (C) 2012 ARM Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
187 */
198
209 #include <linux/bug.h>
10
+#include <linux/context_tracking.h>
2111 #include <linux/signal.h>
2212 #include <linux/personality.h>
2313 #include <linux/kallsyms.h>
14
+#include <linux/kprobes.h>
2415 #include <linux/spinlock.h>
2516 #include <linux/uaccess.h>
2617 #include <linux/hardirq.h>
....@@ -43,7 +34,10 @@
4334 #include <asm/daifflags.h>
4435 #include <asm/debug-monitors.h>
4536 #include <asm/esr.h>
37
+#include <asm/exception.h>
38
+#include <asm/extable.h>
4639 #include <asm/insn.h>
40
+#include <asm/kprobes.h>
4741 #include <asm/traps.h>
4842 #include <asm/smp.h>
4943 #include <asm/stack_pointer.h>
....@@ -51,6 +45,8 @@
5145 #include <asm/exception.h>
5246 #include <asm/system_misc.h>
5347 #include <asm/sysreg.h>
48
+
49
+#include <trace/hooks/traps.h>
5450
5551 static const char *handler[]= {
5652 "Synchronous Abort",
....@@ -61,21 +57,19 @@
6157
6258 int show_unhandled_signals = 0;
6359
64
-static void dump_backtrace_entry(unsigned long where)
65
-{
66
- printk(" %pS\n", (void *)where);
67
-}
68
-
69
-static void __dump_instr(const char *lvl, struct pt_regs *regs)
60
+static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
7061 {
7162 unsigned long addr = instruction_pointer(regs);
7263 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
7364 int i;
7465
66
+ if (user_mode(regs))
67
+ return;
68
+
7569 for (i = -4; i < 1; i++) {
7670 unsigned int val, bad;
7771
78
- bad = get_user(val, &((u32 *)addr)[i]);
72
+ bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
7973
8074 if (!bad)
8175 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
....@@ -84,91 +78,22 @@
8478 break;
8579 }
8680 }
81
+
8782 printk("%sCode: %s\n", lvl, str);
88
-}
89
-
90
-static void dump_instr(const char *lvl, struct pt_regs *regs)
91
-{
92
- if (!user_mode(regs)) {
93
- mm_segment_t fs = get_fs();
94
- set_fs(KERNEL_DS);
95
- __dump_instr(lvl, regs);
96
- set_fs(fs);
97
- } else {
98
- __dump_instr(lvl, regs);
99
- }
100
-}
101
-
102
-void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
103
-{
104
- struct stackframe frame;
105
- int skip = 0;
106
-
107
- pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
108
-
109
- if (regs) {
110
- if (user_mode(regs))
111
- return;
112
- skip = 1;
113
- }
114
-
115
- if (!tsk)
116
- tsk = current;
117
-
118
- if (!try_get_task_stack(tsk))
119
- return;
120
-
121
- if (tsk == current) {
122
- frame.fp = (unsigned long)__builtin_frame_address(0);
123
- frame.pc = (unsigned long)dump_backtrace;
124
- } else {
125
- /*
126
- * task blocked in __switch_to
127
- */
128
- frame.fp = thread_saved_fp(tsk);
129
- frame.pc = thread_saved_pc(tsk);
130
- }
131
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
132
- frame.graph = tsk->curr_ret_stack;
133
-#endif
134
-
135
- printk("Call trace:\n");
136
- do {
137
- /* skip until specified stack frame */
138
- if (!skip) {
139
- dump_backtrace_entry(frame.pc);
140
- } else if (frame.fp == regs->regs[29]) {
141
- skip = 0;
142
- /*
143
- * Mostly, this is the case where this function is
144
- * called in panic/abort. As exception handler's
145
- * stack frame does not contain the corresponding pc
146
- * at which an exception has taken place, use regs->pc
147
- * instead.
148
- */
149
- dump_backtrace_entry(regs->pc);
150
- }
151
- } while (!unwind_frame(tsk, &frame));
152
-
153
- put_task_stack(tsk);
154
-}
155
-
156
-void show_stack(struct task_struct *tsk, unsigned long *sp)
157
-{
158
- dump_backtrace(NULL, tsk);
159
- barrier();
16083 }
16184
16285 #ifdef CONFIG_PREEMPT
16386 #define S_PREEMPT " PREEMPT"
87
+#elif defined(CONFIG_PREEMPT_RT)
88
+#define S_PREEMPT " PREEMPT_RT"
16489 #else
16590 #define S_PREEMPT ""
16691 #endif
92
+
16793 #define S_SMP " SMP"
16894
16995 static int __die(const char *str, int err, struct pt_regs *regs)
17096 {
171
- struct task_struct *tsk = current;
17297 static int die_counter;
17398 int ret;
17499
....@@ -181,13 +106,9 @@
181106 return ret;
182107
183108 print_modules();
184
- pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
185
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
186
- end_of_stack(tsk));
187109 show_regs(regs);
188110
189
- if (!user_mode(regs))
190
- dump_instr(KERN_EMERG, regs);
111
+ dump_kernel_instr(KERN_EMERG, regs);
191112
192113 return ret;
193114 }
....@@ -218,9 +139,9 @@
218139 oops_exit();
219140
220141 if (in_interrupt())
221
- panic("Fatal exception in interrupt");
142
+ panic("%s: Fatal exception in interrupt", str);
222143 if (panic_on_oops)
223
- panic("Fatal exception");
144
+ panic("%s: Fatal exception", str);
224145
225146 raw_spin_unlock_irqrestore(&die_lock, flags);
226147
....@@ -228,24 +149,19 @@
228149 do_exit(SIGSEGV);
229150 }
230151
231
-static bool show_unhandled_signals_ratelimited(void)
152
+static void arm64_show_signal(int signo, const char *str)
232153 {
233154 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
234155 DEFAULT_RATELIMIT_BURST);
235
- return show_unhandled_signals && __ratelimit(&rs);
236
-}
237
-
238
-void arm64_force_sig_info(struct siginfo *info, const char *str,
239
- struct task_struct *tsk)
240
-{
156
+ struct task_struct *tsk = current;
241157 unsigned int esr = tsk->thread.fault_code;
242158 struct pt_regs *regs = task_pt_regs(tsk);
243159
244
- if (!unhandled_signal(tsk, info->si_signo))
245
- goto send_sig;
246
-
247
- if (!show_unhandled_signals_ratelimited())
248
- goto send_sig;
160
+ /* Leave if the signal won't be shown */
161
+ if (!show_unhandled_signals ||
162
+ !unhandled_signal(tsk, signo) ||
163
+ !__ratelimit(&rs))
164
+ return;
249165
250166 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
251167 if (esr)
....@@ -255,23 +171,101 @@
255171 print_vma_addr(KERN_CONT " in ", regs->pc);
256172 pr_cont("\n");
257173 __show_regs(regs);
174
+}
258175
259
-send_sig:
260
- force_sig_info(info->si_signo, info, tsk);
176
+void arm64_force_sig_fault(int signo, int code, unsigned long far,
177
+ const char *str)
178
+{
179
+ arm64_show_signal(signo, str);
180
+ if (signo == SIGKILL)
181
+ force_sig(SIGKILL);
182
+ else
183
+ force_sig_fault(signo, code, (void __user *)far);
184
+}
185
+
186
+void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
187
+ const char *str)
188
+{
189
+ arm64_show_signal(SIGBUS, str);
190
+ force_sig_mceerr(code, (void __user *)far, lsb);
191
+}
192
+
193
+void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
194
+ const char *str)
195
+{
196
+ arm64_show_signal(SIGTRAP, str);
197
+ force_sig_ptrace_errno_trap(errno, (void __user *)far);
261198 }
262199
263200 void arm64_notify_die(const char *str, struct pt_regs *regs,
264
- struct siginfo *info, int err)
201
+ int signo, int sicode, unsigned long far,
202
+ int err)
265203 {
266204 if (user_mode(regs)) {
267205 WARN_ON(regs != current_pt_regs());
268206 current->thread.fault_address = 0;
269207 current->thread.fault_code = err;
270
- arm64_force_sig_info(info, str, current);
208
+
209
+ arm64_force_sig_fault(signo, sicode, far, str);
271210 } else {
272211 die(str, regs, err);
273212 }
274213 }
214
+
215
+#ifdef CONFIG_COMPAT
216
+#define PSTATE_IT_1_0_SHIFT 25
217
+#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
218
+#define PSTATE_IT_7_2_SHIFT 10
219
+#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
220
+
221
+static u32 compat_get_it_state(struct pt_regs *regs)
222
+{
223
+ u32 it, pstate = regs->pstate;
224
+
225
+ it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
226
+ it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
227
+
228
+ return it;
229
+}
230
+
231
+static void compat_set_it_state(struct pt_regs *regs, u32 it)
232
+{
233
+ u32 pstate_it;
234
+
235
+ pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
236
+ pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
237
+
238
+ regs->pstate &= ~PSR_AA32_IT_MASK;
239
+ regs->pstate |= pstate_it;
240
+}
241
+
242
+static void advance_itstate(struct pt_regs *regs)
243
+{
244
+ u32 it;
245
+
246
+ /* ARM mode */
247
+ if (!(regs->pstate & PSR_AA32_T_BIT) ||
248
+ !(regs->pstate & PSR_AA32_IT_MASK))
249
+ return;
250
+
251
+ it = compat_get_it_state(regs);
252
+
253
+ /*
254
+ * If this is the last instruction of the block, wipe the IT
255
+ * state. Otherwise advance it.
256
+ */
257
+ if (!(it & 7))
258
+ it = 0;
259
+ else
260
+ it = (it & 0xe0) | ((it << 1) & 0x1f);
261
+
262
+ compat_set_it_state(regs, it);
263
+}
264
+#else
265
+static void advance_itstate(struct pt_regs *regs)
266
+{
267
+}
268
+#endif
275269
276270 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
277271 {
....@@ -283,6 +277,11 @@
283277 */
284278 if (user_mode(regs))
285279 user_fastforward_single_step(current);
280
+
281
+ if (compat_user_mode(regs))
282
+ advance_itstate(regs);
283
+ else
284
+ regs->pstate &= ~PSR_BTYPE_MASK;
286285 }
287286
288287 static LIST_HEAD(undef_hook);
....@@ -316,7 +315,7 @@
316315
317316 if (!user_mode(regs)) {
318317 __le32 instr_le;
319
- if (probe_kernel_address((__force __le32 *)pc, instr_le))
318
+ if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
320319 goto exit;
321320 instr = le32_to_cpu(instr_le);
322321 } else if (compat_thumb_mode(regs)) {
....@@ -352,13 +351,13 @@
352351 return fn ? fn(regs, instr) : 1;
353352 }
354353
355
-void force_signal_inject(int signal, int code, unsigned long address)
354
+void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
356355 {
357
- siginfo_t info;
358356 const char *desc;
359357 struct pt_regs *regs = current_pt_regs();
360358
361
- clear_siginfo(&info);
359
+ if (WARN_ON(!user_mode(regs)))
360
+ return;
362361
363362 switch (signal) {
364363 case SIGILL:
....@@ -378,12 +377,7 @@
378377 signal = SIGKILL;
379378 }
380379
381
- info.si_signo = signal;
382
- info.si_errno = 0;
383
- info.si_code = code;
384
- info.si_addr = (void __user *)address;
385
-
386
- arm64_notify_die(desc, regs, &info, 0);
380
+ arm64_notify_die(desc, regs, signal, code, address, err);
387381 }
388382
389383 /*
....@@ -393,17 +387,17 @@
393387 {
394388 int code;
395389
396
- down_read(&current->mm->mmap_sem);
397
- if (find_vma(current->mm, addr) == NULL)
390
+ mmap_read_lock(current->mm);
391
+ if (find_vma(current->mm, untagged_addr(addr)) == NULL)
398392 code = SEGV_MAPERR;
399393 else
400394 code = SEGV_ACCERR;
401
- up_read(&current->mm->mmap_sem);
395
+ mmap_read_unlock(current->mm);
402396
403
- force_signal_inject(SIGSEGV, code, addr);
397
+ force_signal_inject(SIGSEGV, code, addr, 0);
404398 }
405399
406
-asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
400
+void do_undefinstr(struct pt_regs *regs)
407401 {
408402 /* check for AArch32 breakpoint instructions */
409403 if (!aarch32_break_handler(regs))
....@@ -412,14 +406,30 @@
412406 if (call_undef_hook(regs) == 0)
413407 return;
414408
415
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
409
+ trace_android_rvh_do_undefinstr(regs, user_mode(regs));
416410 BUG_ON(!user_mode(regs));
411
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
417412 }
413
+NOKPROBE_SYMBOL(do_undefinstr);
418414
419
-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
415
+void do_bti(struct pt_regs *regs)
420416 {
421
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
417
+ BUG_ON(!user_mode(regs));
418
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
422419 }
420
+NOKPROBE_SYMBOL(do_bti);
421
+
422
+void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
423
+{
424
+ /*
425
+ * Unexpected FPAC exception or pointer authentication failure in
426
+ * the kernel: kill the task before it does any more harm.
427
+ */
428
+ trace_android_rvh_do_ptrauth_fault(regs, esr, user_mode(regs));
429
+ BUG_ON(!user_mode(regs));
430
+ force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
431
+}
432
+NOKPROBE_SYMBOL(do_ptrauth_fault);
423433
424434 #define __user_cache_maint(insn, address, res) \
425435 if (address >= user_addr_max()) { \
....@@ -443,12 +453,13 @@
443453
444454 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
445455 {
446
- unsigned long address;
447
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
456
+ unsigned long tagged_address, address;
457
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
448458 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
449459 int ret = 0;
450460
451
- address = untagged_addr(pt_regs_read_reg(regs, rt));
461
+ tagged_address = pt_regs_read_reg(regs, rt);
462
+ address = untagged_addr(tagged_address);
452463
453464 switch (crm) {
454465 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
....@@ -456,6 +467,9 @@
456467 break;
457468 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
458469 __user_cache_maint("dc civac", address, ret);
470
+ break;
471
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
472
+ __user_cache_maint("sys 3, c7, c13, 1", address, ret);
459473 break;
460474 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
461475 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
....@@ -467,19 +481,19 @@
467481 __user_cache_maint("ic ivau", address, ret);
468482 break;
469483 default:
470
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
484
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
471485 return;
472486 }
473487
474488 if (ret)
475
- arm64_notify_segfault(address);
489
+ arm64_notify_segfault(tagged_address);
476490 else
477491 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
478492 }
479493
480494 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
481495 {
482
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
496
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
483497 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
484498
485499 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
....@@ -498,17 +512,33 @@
498512
499513 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
500514 {
501
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
515
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
502516
503
- pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
517
+ pt_regs_write_reg(regs, rt, arch_timer_read_counter());
504518 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
505519 }
506520
507521 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
508522 {
509
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
523
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
510524
511525 pt_regs_write_reg(regs, rt, arch_timer_get_rate());
526
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
527
+}
528
+
529
+static void mrs_handler(unsigned int esr, struct pt_regs *regs)
530
+{
531
+ u32 sysreg, rt;
532
+
533
+ rt = ESR_ELx_SYS64_ISS_RT(esr);
534
+ sysreg = esr_sys64_to_sysreg(esr);
535
+
536
+ if (do_emulate_mrs(regs, sysreg, rt) != 0)
537
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
538
+}
539
+
540
+static void wfi_handler(unsigned int esr, struct pt_regs *regs)
541
+{
512542 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
513543 }
514544
....@@ -518,7 +548,7 @@
518548 void (*handler)(unsigned int esr, struct pt_regs *regs);
519549 };
520550
521
-static struct sys64_hook sys64_hooks[] = {
551
+static const struct sys64_hook sys64_hooks[] = {
522552 {
523553 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
524554 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
....@@ -542,12 +572,123 @@
542572 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
543573 .handler = cntfrq_read_handler,
544574 },
575
+ {
576
+ /* Trap read access to CPUID registers */
577
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
578
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
579
+ .handler = mrs_handler,
580
+ },
581
+ {
582
+ /* Trap WFI instructions executed in userspace */
583
+ .esr_mask = ESR_ELx_WFx_MASK,
584
+ .esr_val = ESR_ELx_WFx_WFI_VAL,
585
+ .handler = wfi_handler,
586
+ },
545587 {},
546588 };
547589
548
-asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
590
+#ifdef CONFIG_COMPAT
591
+static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
549592 {
550
- struct sys64_hook *hook;
593
+ int cond;
594
+
595
+ /* Only a T32 instruction can trap without CV being set */
596
+ if (!(esr & ESR_ELx_CV)) {
597
+ u32 it;
598
+
599
+ it = compat_get_it_state(regs);
600
+ if (!it)
601
+ return true;
602
+
603
+ cond = it >> 4;
604
+ } else {
605
+ cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
606
+ }
607
+
608
+ return aarch32_opcode_cond_checks[cond](regs->pstate);
609
+}
610
+
611
+static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
612
+{
613
+ int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
614
+
615
+ pt_regs_write_reg(regs, reg, arch_timer_get_rate());
616
+ arm64_skip_faulting_instruction(regs, 4);
617
+}
618
+
619
+static const struct sys64_hook cp15_32_hooks[] = {
620
+ {
621
+ .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
622
+ .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
623
+ .handler = compat_cntfrq_read_handler,
624
+ },
625
+ {},
626
+};
627
+
628
+static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
629
+{
630
+ int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
631
+ int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
632
+ u64 val = arch_timer_read_counter();
633
+
634
+ pt_regs_write_reg(regs, rt, lower_32_bits(val));
635
+ pt_regs_write_reg(regs, rt2, upper_32_bits(val));
636
+ arm64_skip_faulting_instruction(regs, 4);
637
+}
638
+
639
+static const struct sys64_hook cp15_64_hooks[] = {
640
+ {
641
+ .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
642
+ .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
643
+ .handler = compat_cntvct_read_handler,
644
+ },
645
+ {},
646
+};
647
+
648
+void do_cp15instr(unsigned int esr, struct pt_regs *regs)
649
+{
650
+ const struct sys64_hook *hook, *hook_base;
651
+
652
+ if (!cp15_cond_valid(esr, regs)) {
653
+ /*
654
+ * There is no T16 variant of a CP access, so we
655
+ * always advance PC by 4 bytes.
656
+ */
657
+ arm64_skip_faulting_instruction(regs, 4);
658
+ return;
659
+ }
660
+
661
+ switch (ESR_ELx_EC(esr)) {
662
+ case ESR_ELx_EC_CP15_32:
663
+ hook_base = cp15_32_hooks;
664
+ break;
665
+ case ESR_ELx_EC_CP15_64:
666
+ hook_base = cp15_64_hooks;
667
+ break;
668
+ default:
669
+ do_undefinstr(regs);
670
+ return;
671
+ }
672
+
673
+ for (hook = hook_base; hook->handler; hook++)
674
+ if ((hook->esr_mask & esr) == hook->esr_val) {
675
+ hook->handler(esr, regs);
676
+ return;
677
+ }
678
+
679
+ /*
680
+ * New cp15 instructions may previously have been undefined at
681
+ * EL0. Fall back to our usual undefined instruction handler
682
+ * so that we handle these consistently.
683
+ */
684
+ do_undefinstr(regs);
685
+}
686
+NOKPROBE_SYMBOL(do_cp15instr);
687
+#endif
688
+
689
+void do_sysinstr(unsigned int esr, struct pt_regs *regs)
690
+{
691
+ const struct sys64_hook *hook;
551692
552693 for (hook = sys64_hooks; hook->handler; hook++)
553694 if ((hook->esr_mask & esr) == hook->esr_val) {
....@@ -562,6 +703,7 @@
562703 */
563704 do_undefinstr(regs);
564705 }
706
+NOKPROBE_SYMBOL(do_sysinstr);
565707
566708 static const char *esr_class_str[] = {
567709 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
....@@ -573,7 +715,9 @@
573715 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
574716 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
575717 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
718
+ [ESR_ELx_EC_PAC] = "PAC",
576719 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
720
+ [ESR_ELx_EC_BTI] = "BTI",
577721 [ESR_ELx_EC_ILL] = "PSTATE.IL",
578722 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
579723 [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
....@@ -583,6 +727,8 @@
583727 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
584728 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
585729 [ESR_ELx_EC_SVE] = "SVE",
730
+ [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
731
+ [ESR_ELx_EC_FPAC] = "FPAC",
586732 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
587733 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
588734 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
....@@ -613,14 +759,18 @@
613759 * bad_mode handles the impossible case in the exception vector. This is always
614760 * fatal.
615761 */
616
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
762
+asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
617763 {
764
+ arm64_enter_nmi(regs);
765
+
618766 console_verbose();
619767
620768 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
621769 handler[reason], smp_processor_id(), esr,
622770 esr_get_class_string(esr));
623771
772
+ trace_android_rvh_bad_mode(regs, esr, reason);
773
+ __show_regs(regs);
624774 local_daif_mask();
625775 panic("bad mode");
626776 }
....@@ -629,21 +779,15 @@
629779 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
630780 * exceptions taken from EL0. Unlike bad_mode, this returns.
631781 */
632
-asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
782
+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
633783 {
634
- siginfo_t info;
635
- void __user *pc = (void __user *)instruction_pointer(regs);
636
-
637
- clear_siginfo(&info);
638
- info.si_signo = SIGILL;
639
- info.si_errno = 0;
640
- info.si_code = ILL_ILLOPC;
641
- info.si_addr = pc;
784
+ unsigned long pc = instruction_pointer(regs);
642785
643786 current->thread.fault_address = 0;
644787 current->thread.fault_code = esr;
645788
646
- arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current);
789
+ arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
790
+ "Bad EL0 synchronous exception");
647791 }
648792
649793 #ifdef CONFIG_VMAP_STACK
....@@ -651,13 +795,15 @@
651795 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
652796 __aligned(16);
653797
654
-asmlinkage void handle_bad_stack(struct pt_regs *regs)
798
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
655799 {
656800 unsigned long tsk_stk = (unsigned long)current->stack;
657801 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
658802 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
659803 unsigned int esr = read_sysreg(esr_el1);
660804 unsigned long far = read_sysreg(far_el1);
805
+
806
+ arm64_enter_nmi(regs);
661807
662808 console_verbose();
663809 pr_emerg("Insufficient stack space to handle exception!");
....@@ -668,7 +814,7 @@
668814 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
669815 tsk_stk, tsk_stk + THREAD_SIZE);
670816 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
671
- irq_stk, irq_stk + THREAD_SIZE);
817
+ irq_stk, irq_stk + IRQ_STACK_SIZE);
672818 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
673819 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
674820
....@@ -689,6 +835,8 @@
689835
690836 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
691837 smp_processor_id(), esr, esr_get_class_string(esr));
838
+
839
+ trace_android_rvh_arm64_serror_panic(regs, esr);
692840 if (regs)
693841 __show_regs(regs);
694842
....@@ -716,6 +864,10 @@
716864 /*
717865 * The CPU can't make progress. The exception may have
718866 * been imprecise.
867
+ *
868
+ * Neoverse-N1 #1349291 means a non-KVM SError reported as
869
+ * Unrecoverable should be treated as Uncontainable. We
870
+ * call arm64_serror_panic() in both cases.
719871 */
720872 return true;
721873
....@@ -726,35 +878,15 @@
726878 }
727879 }
728880
729
-asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
881
+asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
730882 {
731
- nmi_enter();
883
+ arm64_enter_nmi(regs);
732884
733885 /* non-RAS errors are not containable */
734886 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
735887 arm64_serror_panic(regs, esr);
736888
737
- nmi_exit();
738
-}
739
-
740
-void __pte_error(const char *file, int line, unsigned long val)
741
-{
742
- pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
743
-}
744
-
745
-void __pmd_error(const char *file, int line, unsigned long val)
746
-{
747
- pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
748
-}
749
-
750
-void __pud_error(const char *file, int line, unsigned long val)
751
-{
752
- pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
753
-}
754
-
755
-void __pgd_error(const char *file, int line, unsigned long val)
756
-{
757
- pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
889
+ arm64_exit_nmi(regs);
758890 }
759891
760892 /* GENERIC_BUG traps */
....@@ -773,9 +905,6 @@
773905
774906 static int bug_handler(struct pt_regs *regs, unsigned int esr)
775907 {
776
- if (user_mode(regs))
777
- return DBG_HOOK_ERROR;
778
-
779908 switch (report_bug(regs->pc, regs)) {
780909 case BUG_TRAP_TYPE_BUG:
781910 die("Oops - BUG", regs, 0);
....@@ -795,9 +924,23 @@
795924 }
796925
797926 static struct break_hook bug_break_hook = {
798
- .esr_val = 0xf2000000 | BUG_BRK_IMM,
799
- .esr_mask = 0xffffffff,
800927 .fn = bug_handler,
928
+ .imm = BUG_BRK_IMM,
929
+};
930
+
931
+static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
932
+{
933
+ pr_err("%s generated an invalid instruction at %pS!\n",
934
+ "Kernel text patching",
935
+ (void *)instruction_pointer(regs));
936
+
937
+ /* We cannot handle this */
938
+ return DBG_HOOK_ERROR;
939
+}
940
+
941
+static struct break_hook fault_break_hook = {
942
+ .fn = reserved_fault_handler,
943
+ .imm = FAULT_BRK_IMM,
801944 };
802945
803946 #ifdef CONFIG_KASAN_SW_TAGS
....@@ -814,9 +957,6 @@
814957 size_t size = KASAN_ESR_SIZE(esr);
815958 u64 addr = regs->regs[0];
816959 u64 pc = regs->pc;
817
-
818
- if (user_mode(regs))
819
- return DBG_HOOK_ERROR;
820960
821961 kasan_report(addr, size, write, pc);
822962
....@@ -842,13 +982,10 @@
842982 return DBG_HOOK_HANDLED;
843983 }
844984
845
-#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
846
-#define KASAN_ESR_MASK 0xffffff00
847
-
848985 static struct break_hook kasan_break_hook = {
849
- .esr_val = KASAN_ESR_VAL,
850
- .esr_mask = KASAN_ESR_MASK,
851
- .fn = kasan_handler,
986
+ .fn = kasan_handler,
987
+ .imm = KASAN_BRK_IMM,
988
+ .mask = KASAN_BRK_MASK,
852989 };
853990 #endif
854991
....@@ -860,17 +997,20 @@
860997 struct pt_regs *regs)
861998 {
862999 #ifdef CONFIG_KASAN_SW_TAGS
863
- if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
1000
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
1001
+
1002
+ if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
8641003 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
8651004 #endif
8661005 return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
8671006 }
8681007
869
-/* This registration must happen early, before debug_traps_init(). */
8701008 void __init trap_init(void)
8711009 {
872
- register_break_hook(&bug_break_hook);
1010
+ register_kernel_break_hook(&bug_break_hook);
1011
+ register_kernel_break_hook(&fault_break_hook);
8731012 #ifdef CONFIG_KASAN_SW_TAGS
874
- register_break_hook(&kasan_break_hook);
1013
+ register_kernel_break_hook(&kasan_break_hook);
8751014 #endif
1015
+ debug_traps_init();
8761016 }