hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm64/kernel/traps.c
....@@ -15,6 +15,7 @@
1515 #include <linux/spinlock.h>
1616 #include <linux/uaccess.h>
1717 #include <linux/hardirq.h>
18
+#include <linux/irqstage.h>
1819 #include <linux/kdebug.h>
1920 #include <linux/module.h>
2021 #include <linux/kexec.h>
....@@ -117,7 +118,7 @@
117118 return ret;
118119 }
119120
120
-static DEFINE_RAW_SPINLOCK(die_lock);
121
+static DEFINE_HARD_SPINLOCK(die_lock);
121122
122123 /*
123124 * This function is protected against re-entrancy.
....@@ -292,7 +293,7 @@
292293 }
293294
294295 static LIST_HEAD(undef_hook);
295
-static DEFINE_RAW_SPINLOCK(undef_lock);
296
+static DEFINE_HARD_SPINLOCK(undef_lock);
296297
297298 void register_undef_hook(struct undef_hook *hook)
298299 {
....@@ -406,6 +407,13 @@
406407
407408 void do_undefinstr(struct pt_regs *regs)
408409 {
410
+ /*
411
+ * If the companion core did not switched us to in-band
412
+ * context, we may assume that it has handled the trap.
413
+ */
414
+ if (running_oob())
415
+ return;
416
+
409417 /* check for AArch32 breakpoint instructions */
410418 if (!aarch32_break_handler(regs))
411419 return;
....@@ -415,14 +423,18 @@
415423
416424 trace_android_rvh_do_undefinstr(regs, user_mode(regs));
417425 BUG_ON(!user_mode(regs));
426
+ mark_trap_entry(ARM64_TRAP_UNDI, regs);
418427 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
428
+ mark_trap_exit(ARM64_TRAP_UNDI, regs);
419429 }
420430 NOKPROBE_SYMBOL(do_undefinstr);
421431
422432 void do_bti(struct pt_regs *regs)
423433 {
424434 BUG_ON(!user_mode(regs));
435
+ mark_trap_entry(ARM64_TRAP_BTI, regs);
425436 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
437
+ mark_trap_exit(ARM64_TRAP_BTI, regs);
426438 }
427439 NOKPROBE_SYMBOL(do_bti);
428440
....@@ -492,9 +504,11 @@
492504 return;
493505 }
494506
495
- if (ret)
496
- arm64_notify_segfault(tagged_address);
497
- else
507
+ if (ret) {
508
+ mark_trap_entry(ARM64_TRAP_ACCESS, regs);
509
+ arm64_notify_segfault(address);
510
+ mark_trap_exit(ARM64_TRAP_ACCESS, regs);
511
+ } else
498512 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
499513 }
500514
....@@ -540,8 +554,11 @@
540554 rt = ESR_ELx_SYS64_ISS_RT(esr);
541555 sysreg = esr_sys64_to_sysreg(esr);
542556
543
- if (do_emulate_mrs(regs, sysreg, rt) != 0)
557
+ if (do_emulate_mrs(regs, sysreg, rt) != 0) {
558
+ mark_trap_entry(ARM64_TRAP_ACCESS, regs);
544559 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
560
+ mark_trap_exit(ARM64_TRAP_ACCESS, regs);
561
+ }
545562 }
546563
547564 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
....@@ -768,6 +785,11 @@
768785 */
769786 asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
770787 {
788
+ /*
789
+ * Dovetail: Same as __do_kernel_fault(), don't bother
790
+ * restoring the in-band stage, this trap is fatal and we are
791
+ * already walking on thin ice.
792
+ */
771793 arm64_enter_nmi(regs);
772794
773795 console_verbose();
....@@ -790,11 +812,13 @@
790812 {
791813 unsigned long pc = instruction_pointer(regs);
792814
815
+ mark_trap_entry(ARM64_TRAP_ACCESS, regs);
793816 current->thread.fault_address = 0;
794817 current->thread.fault_code = esr;
795818
796819 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
797820 "Bad EL0 synchronous exception");
821
+ mark_trap_exit(ARM64_TRAP_ACCESS, regs);
798822 }
799823
800824 #ifdef CONFIG_VMAP_STACK