.. | .. |
---|
15 | 15 | #include <linux/spinlock.h> |
---|
16 | 16 | #include <linux/uaccess.h> |
---|
17 | 17 | #include <linux/hardirq.h> |
---|
| 18 | +#include <linux/irqstage.h> |
---|
18 | 19 | #include <linux/kdebug.h> |
---|
19 | 20 | #include <linux/module.h> |
---|
20 | 21 | #include <linux/kexec.h> |
---|
.. | .. |
---|
117 | 118 | return ret; |
---|
118 | 119 | } |
---|
119 | 120 | |
---|
120 | | -static DEFINE_RAW_SPINLOCK(die_lock); |
---|
| 121 | +static DEFINE_HARD_SPINLOCK(die_lock); |
---|
121 | 122 | |
---|
122 | 123 | /* |
---|
123 | 124 | * This function is protected against re-entrancy. |
---|
.. | .. |
---|
292 | 293 | } |
---|
293 | 294 | |
---|
294 | 295 | static LIST_HEAD(undef_hook); |
---|
295 | | -static DEFINE_RAW_SPINLOCK(undef_lock); |
---|
| 296 | +static DEFINE_HARD_SPINLOCK(undef_lock); |
---|
296 | 297 | |
---|
297 | 298 | void register_undef_hook(struct undef_hook *hook) |
---|
298 | 299 | { |
---|
.. | .. |
---|
406 | 407 | |
---|
407 | 408 | void do_undefinstr(struct pt_regs *regs) |
---|
408 | 409 | { |
---|
| 410 | + /* |
---|
| 411 | + * If the companion core did not switched us to in-band |
---|
| 412 | + * context, we may assume that it has handled the trap. |
---|
| 413 | + */ |
---|
| 414 | + if (running_oob()) |
---|
| 415 | + return; |
---|
| 416 | + |
---|
409 | 417 | /* check for AArch32 breakpoint instructions */ |
---|
410 | 418 | if (!aarch32_break_handler(regs)) |
---|
411 | 419 | return; |
---|
.. | .. |
---|
415 | 423 | |
---|
416 | 424 | trace_android_rvh_do_undefinstr(regs, user_mode(regs)); |
---|
417 | 425 | BUG_ON(!user_mode(regs)); |
---|
| 426 | + mark_trap_entry(ARM64_TRAP_UNDI, regs); |
---|
418 | 427 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
| 428 | + mark_trap_exit(ARM64_TRAP_UNDI, regs); |
---|
419 | 429 | } |
---|
420 | 430 | NOKPROBE_SYMBOL(do_undefinstr); |
---|
421 | 431 | |
---|
422 | 432 | void do_bti(struct pt_regs *regs) |
---|
423 | 433 | { |
---|
424 | 434 | BUG_ON(!user_mode(regs)); |
---|
| 435 | + mark_trap_entry(ARM64_TRAP_BTI, regs); |
---|
425 | 436 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
| 437 | + mark_trap_exit(ARM64_TRAP_BTI, regs); |
---|
426 | 438 | } |
---|
427 | 439 | NOKPROBE_SYMBOL(do_bti); |
---|
428 | 440 | |
---|
.. | .. |
---|
492 | 504 | return; |
---|
493 | 505 | } |
---|
494 | 506 | |
---|
495 | | - if (ret) |
---|
496 | | - arm64_notify_segfault(tagged_address); |
---|
497 | | - else |
---|
| 507 | + if (ret) { |
---|
| 508 | + mark_trap_entry(ARM64_TRAP_ACCESS, regs); |
---|
| 509 | + arm64_notify_segfault(address); |
---|
| 510 | + mark_trap_exit(ARM64_TRAP_ACCESS, regs); |
---|
| 511 | + } else |
---|
498 | 512 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
---|
499 | 513 | } |
---|
500 | 514 | |
---|
.. | .. |
---|
540 | 554 | rt = ESR_ELx_SYS64_ISS_RT(esr); |
---|
541 | 555 | sysreg = esr_sys64_to_sysreg(esr); |
---|
542 | 556 | |
---|
543 | | - if (do_emulate_mrs(regs, sysreg, rt) != 0) |
---|
| 557 | + if (do_emulate_mrs(regs, sysreg, rt) != 0) { |
---|
| 558 | + mark_trap_entry(ARM64_TRAP_ACCESS, regs); |
---|
544 | 559 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
| 560 | + mark_trap_exit(ARM64_TRAP_ACCESS, regs); |
---|
| 561 | + } |
---|
545 | 562 | } |
---|
546 | 563 | |
---|
547 | 564 | static void wfi_handler(unsigned int esr, struct pt_regs *regs) |
---|
.. | .. |
---|
768 | 785 | */ |
---|
769 | 786 | asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr) |
---|
770 | 787 | { |
---|
| 788 | + /* |
---|
| 789 | + * Dovetail: Same as __do_kernel_fault(), don't bother |
---|
| 790 | + * restoring the in-band stage, this trap is fatal and we are |
---|
| 791 | + * already walking on thin ice. |
---|
| 792 | + */ |
---|
771 | 793 | arm64_enter_nmi(regs); |
---|
772 | 794 | |
---|
773 | 795 | console_verbose(); |
---|
.. | .. |
---|
790 | 812 | { |
---|
791 | 813 | unsigned long pc = instruction_pointer(regs); |
---|
792 | 814 | |
---|
| 815 | + mark_trap_entry(ARM64_TRAP_ACCESS, regs); |
---|
793 | 816 | current->thread.fault_address = 0; |
---|
794 | 817 | current->thread.fault_code = esr; |
---|
795 | 818 | |
---|
796 | 819 | arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, |
---|
797 | 820 | "Bad EL0 synchronous exception"); |
---|
| 821 | + mark_trap_exit(ARM64_TRAP_ACCESS, regs); |
---|
798 | 822 | } |
---|
799 | 823 | |
---|
800 | 824 | #ifdef CONFIG_VMAP_STACK |
---|