.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
---|
3 | 4 | * Copyright 2007-2010 Freescale Semiconductor, Inc. |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or |
---|
6 | | - * modify it under the terms of the GNU General Public License |
---|
7 | | - * as published by the Free Software Foundation; either version |
---|
8 | | - * 2 of the License, or (at your option) any later version. |
---|
9 | 5 | * |
---|
10 | 6 | * Modified by Cort Dougan (cort@cs.nmt.edu) |
---|
11 | 7 | * and Paul Mackerras (paulus@samba.org) |
---|
.. | .. |
---|
43 | 39 | #include <linux/kmsg_dump.h> |
---|
44 | 40 | |
---|
45 | 41 | #include <asm/emulated_ops.h> |
---|
46 | | -#include <asm/pgtable.h> |
---|
47 | 42 | #include <linux/uaccess.h> |
---|
48 | 43 | #include <asm/debugfs.h> |
---|
49 | 44 | #include <asm/io.h> |
---|
.. | .. |
---|
71 | 66 | #include <sysdev/fsl_pci.h> |
---|
72 | 67 | #include <asm/kprobes.h> |
---|
73 | 68 | #include <asm/stacktrace.h> |
---|
| 69 | +#include <asm/nmi.h> |
---|
74 | 70 | |
---|
75 | 71 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) |
---|
76 | 72 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
---|
.. | .. |
---|
174 | 170 | |
---|
175 | 171 | extern void panic_flush_kmsg_end(void) |
---|
176 | 172 | { |
---|
177 | | - printk_safe_flush_on_panic(); |
---|
178 | 173 | kmsg_dump(KMSG_DUMP_PANIC); |
---|
179 | 174 | bust_spinlocks(0); |
---|
180 | 175 | debug_locks_off(); |
---|
181 | | - console_flush_on_panic(); |
---|
| 176 | + console_flush_on_panic(CONSOLE_FLUSH_PENDING); |
---|
182 | 177 | } |
---|
183 | 178 | |
---|
184 | 179 | static unsigned long oops_begin(struct pt_regs *regs) |
---|
.. | .. |
---|
247 | 242 | mdelay(MSEC_PER_SEC); |
---|
248 | 243 | } |
---|
249 | 244 | |
---|
250 | | - if (in_interrupt()) |
---|
251 | | - panic("Fatal exception in interrupt"); |
---|
252 | 245 | if (panic_on_oops) |
---|
253 | 246 | panic("Fatal exception"); |
---|
254 | 247 | do_exit(signr); |
---|
255 | 248 | } |
---|
256 | 249 | NOKPROBE_SYMBOL(oops_end); |
---|
257 | 250 | |
---|
| 251 | +static char *get_mmu_str(void) |
---|
| 252 | +{ |
---|
| 253 | + if (early_radix_enabled()) |
---|
| 254 | + return " MMU=Radix"; |
---|
| 255 | + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) |
---|
| 256 | + return " MMU=Hash"; |
---|
| 257 | + return ""; |
---|
| 258 | +} |
---|
| 259 | + |
---|
258 | 260 | static int __die(const char *str, struct pt_regs *regs, long err) |
---|
259 | 261 | { |
---|
| 262 | + const char *pr = ""; |
---|
| 263 | + |
---|
260 | 264 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); |
---|
261 | 265 | |
---|
262 | | - if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) |
---|
263 | | - printk("LE "); |
---|
264 | | - else |
---|
265 | | - printk("BE "); |
---|
| 266 | + if (IS_ENABLED(CONFIG_PREEMPTION)) |
---|
| 267 | + pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; |
---|
266 | 268 | |
---|
267 | | - if (IS_ENABLED(CONFIG_PREEMPT)) |
---|
268 | | - pr_cont("PREEMPT "); |
---|
269 | | - |
---|
270 | | - if (IS_ENABLED(CONFIG_SMP)) |
---|
271 | | - pr_cont("SMP NR_CPUS=%d ", NR_CPUS); |
---|
272 | | - |
---|
273 | | - if (debug_pagealloc_enabled()) |
---|
274 | | - pr_cont("DEBUG_PAGEALLOC "); |
---|
275 | | - |
---|
276 | | - if (IS_ENABLED(CONFIG_NUMA)) |
---|
277 | | - pr_cont("NUMA "); |
---|
278 | | - |
---|
279 | | - pr_cont("%s\n", ppc_md.name ? ppc_md.name : ""); |
---|
| 269 | + printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", |
---|
| 270 | + IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", |
---|
| 271 | + PAGE_SIZE / 1024, get_mmu_str(), |
---|
| 272 | + pr, |
---|
| 273 | + IS_ENABLED(CONFIG_SMP) ? " SMP" : "", |
---|
| 274 | + IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", |
---|
| 275 | + debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", |
---|
| 276 | + IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "", |
---|
| 277 | + ppc_md.name ? ppc_md.name : ""); |
---|
280 | 278 | |
---|
281 | 279 | if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) |
---|
282 | 280 | return 1; |
---|
.. | .. |
---|
307 | 305 | } |
---|
308 | 306 | NOKPROBE_SYMBOL(die); |
---|
309 | 307 | |
---|
310 | | -void user_single_step_siginfo(struct task_struct *tsk, |
---|
311 | | - struct pt_regs *regs, siginfo_t *info) |
---|
| 308 | +void user_single_step_report(struct pt_regs *regs) |
---|
312 | 309 | { |
---|
313 | | - info->si_signo = SIGTRAP; |
---|
314 | | - info->si_code = TRAP_TRACE; |
---|
315 | | - info->si_addr = (void __user *)regs->nip; |
---|
| 310 | + force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip); |
---|
316 | 311 | } |
---|
317 | 312 | |
---|
318 | 313 | static void show_signal_msg(int signr, struct pt_regs *regs, int code, |
---|
.. | .. |
---|
341 | 336 | show_user_instructions(regs); |
---|
342 | 337 | } |
---|
343 | 338 | |
---|
344 | | -void _exception_pkey(int signr, struct pt_regs *regs, int code, |
---|
345 | | - unsigned long addr, int key) |
---|
| 339 | +static bool exception_common(int signr, struct pt_regs *regs, int code, |
---|
| 340 | + unsigned long addr) |
---|
346 | 341 | { |
---|
347 | | - siginfo_t info; |
---|
348 | | - |
---|
349 | 342 | if (!user_mode(regs)) { |
---|
350 | 343 | die("Exception in kernel mode", regs, signr); |
---|
351 | | - return; |
---|
| 344 | + return false; |
---|
352 | 345 | } |
---|
353 | 346 | |
---|
354 | 347 | show_signal_msg(signr, regs, code, addr); |
---|
.. | .. |
---|
364 | 357 | */ |
---|
365 | 358 | thread_pkey_regs_save(¤t->thread); |
---|
366 | 359 | |
---|
367 | | - clear_siginfo(&info); |
---|
368 | | - info.si_signo = signr; |
---|
369 | | - info.si_code = code; |
---|
370 | | - info.si_addr = (void __user *) addr; |
---|
371 | | - info.si_pkey = key; |
---|
| 360 | + return true; |
---|
| 361 | +} |
---|
372 | 362 | |
---|
373 | | - force_sig_info(signr, &info, current); |
---|
| 363 | +void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key) |
---|
| 364 | +{ |
---|
| 365 | + if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr)) |
---|
| 366 | + return; |
---|
| 367 | + |
---|
| 368 | + force_sig_pkuerr((void __user *) addr, key); |
---|
374 | 369 | } |
---|
375 | 370 | |
---|
376 | 371 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) |
---|
377 | 372 | { |
---|
378 | | - _exception_pkey(signr, regs, code, addr, 0); |
---|
| 373 | + if (!exception_common(signr, regs, code, addr)) |
---|
| 374 | + return; |
---|
| 375 | + |
---|
| 376 | + force_sig_fault(signr, code, (void __user *)addr); |
---|
| 377 | +} |
---|
| 378 | + |
---|
| 379 | +/* |
---|
| 380 | + * The interrupt architecture has a quirk in that the HV interrupts excluding |
---|
| 381 | + * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing |
---|
| 382 | + * that an interrupt handler must do is save off a GPR into a scratch register, |
---|
| 383 | + * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch. |
---|
| 384 | + * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing |
---|
| 385 | + * that it is non-reentrant, which leads to random data corruption. |
---|
| 386 | + * |
---|
| 387 | + * The solution is for NMI interrupts in HV mode to check if they originated |
---|
| 388 | + * from these critical HV interrupt regions. If so, then mark them not |
---|
| 389 | + * recoverable. |
---|
| 390 | + * |
---|
| 391 | + * An alternative would be for HV NMIs to use SPRG for scratch to avoid the |
---|
| 392 | + * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux |
---|
| 393 | + * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so |
---|
| 394 | + * that would work. However any other guest OS that may have the SPRG live |
---|
| 395 | + * and MSR[RI]=1 could encounter silent corruption. |
---|
| 396 | + * |
---|
| 397 | + * Builds that do not support KVM could take this second option to increase |
---|
| 398 | + * the recoverability of NMIs. |
---|
| 399 | + */ |
---|
| 400 | +void hv_nmi_check_nonrecoverable(struct pt_regs *regs) |
---|
| 401 | +{ |
---|
| 402 | +#ifdef CONFIG_PPC_POWERNV |
---|
| 403 | + unsigned long kbase = (unsigned long)_stext; |
---|
| 404 | + unsigned long nip = regs->nip; |
---|
| 405 | + |
---|
| 406 | + if (!(regs->msr & MSR_RI)) |
---|
| 407 | + return; |
---|
| 408 | + if (!(regs->msr & MSR_HV)) |
---|
| 409 | + return; |
---|
| 410 | + if (regs->msr & MSR_PR) |
---|
| 411 | + return; |
---|
| 412 | + |
---|
| 413 | + /* |
---|
| 414 | + * Now test if the interrupt has hit a range that may be using |
---|
| 415 | + * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The |
---|
| 416 | + * problem ranges all run un-relocated. Test real and virt modes |
---|
| 417 | + * at the same time by droping the high bit of the nip (virt mode |
---|
| 418 | + * entry points still have the +0x4000 offset). |
---|
| 419 | + */ |
---|
| 420 | + nip &= ~0xc000000000000000ULL; |
---|
| 421 | + if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600)) |
---|
| 422 | + goto nonrecoverable; |
---|
| 423 | + if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00)) |
---|
| 424 | + goto nonrecoverable; |
---|
| 425 | + if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0)) |
---|
| 426 | + goto nonrecoverable; |
---|
| 427 | + if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0)) |
---|
| 428 | + goto nonrecoverable; |
---|
| 429 | + |
---|
| 430 | + /* Trampoline code runs un-relocated so subtract kbase. */ |
---|
| 431 | + if (nip >= (unsigned long)(start_real_trampolines - kbase) && |
---|
| 432 | + nip < (unsigned long)(end_real_trampolines - kbase)) |
---|
| 433 | + goto nonrecoverable; |
---|
| 434 | + if (nip >= (unsigned long)(start_virt_trampolines - kbase) && |
---|
| 435 | + nip < (unsigned long)(end_virt_trampolines - kbase)) |
---|
| 436 | + goto nonrecoverable; |
---|
| 437 | + return; |
---|
| 438 | + |
---|
| 439 | +nonrecoverable: |
---|
| 440 | + regs->msr &= ~MSR_RI; |
---|
| 441 | +#endif |
---|
379 | 442 | } |
---|
380 | 443 | |
---|
381 | 444 | void system_reset_exception(struct pt_regs *regs) |
---|
382 | 445 | { |
---|
| 446 | + unsigned long hsrr0, hsrr1; |
---|
| 447 | + bool saved_hsrrs = false; |
---|
| 448 | + u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); |
---|
| 449 | + |
---|
| 450 | + this_cpu_set_ftrace_enabled(0); |
---|
| 451 | + |
---|
| 452 | + nmi_enter(); |
---|
| 453 | + |
---|
383 | 454 | /* |
---|
384 | | - * Avoid crashes in case of nested NMI exceptions. Recoverability |
---|
385 | | - * is determined by RI and in_nmi |
---|
| 455 | + * System reset can interrupt code where HSRRs are live and MSR[RI]=1. |
---|
| 456 | + * The system reset interrupt itself may clobber HSRRs (e.g., to call |
---|
| 457 | + * OPAL), so save them here and restore them before returning. |
---|
| 458 | + * |
---|
| 459 | + * Machine checks don't need to save HSRRs, as the real mode handler |
---|
| 460 | + * is careful to avoid them, and the regular handler is not delivered |
---|
| 461 | + * as an NMI. |
---|
386 | 462 | */ |
---|
387 | | - bool nested = in_nmi(); |
---|
388 | | - if (!nested) |
---|
389 | | - nmi_enter(); |
---|
| 463 | + if (cpu_has_feature(CPU_FTR_HVMODE)) { |
---|
| 464 | + hsrr0 = mfspr(SPRN_HSRR0); |
---|
| 465 | + hsrr1 = mfspr(SPRN_HSRR1); |
---|
| 466 | + saved_hsrrs = true; |
---|
| 467 | + } |
---|
| 468 | + |
---|
| 469 | + hv_nmi_check_nonrecoverable(regs); |
---|
390 | 470 | |
---|
391 | 471 | __this_cpu_inc(irq_stat.sreset_irqs); |
---|
392 | 472 | |
---|
.. | .. |
---|
439 | 519 | die("Unrecoverable System Reset", regs, SIGABRT); |
---|
440 | 520 | } |
---|
441 | 521 | |
---|
442 | | - if (!nested) |
---|
443 | | - nmi_exit(); |
---|
| 522 | + if (saved_hsrrs) { |
---|
| 523 | + mtspr(SPRN_HSRR0, hsrr0); |
---|
| 524 | + mtspr(SPRN_HSRR1, hsrr1); |
---|
| 525 | + } |
---|
| 526 | + |
---|
| 527 | + nmi_exit(); |
---|
| 528 | + |
---|
| 529 | + this_cpu_set_ftrace_enabled(ftrace_enabled); |
---|
444 | 530 | |
---|
445 | 531 | /* What should we do here? We could issue a shutdown or hard reset. */ |
---|
446 | 532 | } |
---|
.. | .. |
---|
450 | 536 | * Check if the NIP corresponds to the address of a sync |
---|
451 | 537 | * instruction for which there is an entry in the exception |
---|
452 | 538 | * table. |
---|
453 | | - * Note that the 601 only takes a machine check on TEA |
---|
454 | | - * (transfer error ack) signal assertion, and does not |
---|
455 | | - * set any of the top 16 bits of SRR1. |
---|
456 | 539 | * -- paulus. |
---|
457 | 540 | */ |
---|
458 | 541 | static inline int check_io_access(struct pt_regs *regs) |
---|
.. | .. |
---|
501 | 584 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) |
---|
502 | 585 | #define REASON_PRIVILEGED ESR_PPR |
---|
503 | 586 | #define REASON_TRAP ESR_PTR |
---|
| 587 | +#define REASON_PREFIXED 0 |
---|
| 588 | +#define REASON_BOUNDARY 0 |
---|
504 | 589 | |
---|
505 | 590 | /* single-step stuff */ |
---|
506 | 591 | #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) |
---|
.. | .. |
---|
515 | 600 | #define REASON_ILLEGAL SRR1_PROGILL |
---|
516 | 601 | #define REASON_PRIVILEGED SRR1_PROGPRIV |
---|
517 | 602 | #define REASON_TRAP SRR1_PROGTRAP |
---|
| 603 | +#define REASON_PREFIXED SRR1_PREFIXED |
---|
| 604 | +#define REASON_BOUNDARY SRR1_BOUNDARY |
---|
518 | 605 | |
---|
519 | 606 | #define single_stepping(regs) ((regs)->msr & MSR_SE) |
---|
520 | 607 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) |
---|
521 | 608 | #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) |
---|
522 | 609 | #endif |
---|
| 610 | + |
---|
| 611 | +#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) |
---|
523 | 612 | |
---|
524 | 613 | #if defined(CONFIG_E500) |
---|
525 | 614 | int machine_check_e500mc(struct pt_regs *regs) |
---|
.. | .. |
---|
539 | 628 | printk("Caused by (from MCSR=%lx): ", reason); |
---|
540 | 629 | |
---|
541 | 630 | if (reason & MCSR_MCP) |
---|
542 | | - printk("Machine Check Signal\n"); |
---|
| 631 | + pr_cont("Machine Check Signal\n"); |
---|
543 | 632 | |
---|
544 | 633 | if (reason & MCSR_ICPERR) { |
---|
545 | | - printk("Instruction Cache Parity Error\n"); |
---|
| 634 | + pr_cont("Instruction Cache Parity Error\n"); |
---|
546 | 635 | |
---|
547 | 636 | /* |
---|
548 | 637 | * This is recoverable by invalidating the i-cache. |
---|
.. | .. |
---|
560 | 649 | } |
---|
561 | 650 | |
---|
562 | 651 | if (reason & MCSR_DCPERR_MC) { |
---|
563 | | - printk("Data Cache Parity Error\n"); |
---|
| 652 | + pr_cont("Data Cache Parity Error\n"); |
---|
564 | 653 | |
---|
565 | 654 | /* |
---|
566 | 655 | * In write shadow mode we auto-recover from the error, but it |
---|
.. | .. |
---|
579 | 668 | } |
---|
580 | 669 | |
---|
581 | 670 | if (reason & MCSR_L2MMU_MHIT) { |
---|
582 | | - printk("Hit on multiple TLB entries\n"); |
---|
| 671 | + pr_cont("Hit on multiple TLB entries\n"); |
---|
583 | 672 | recoverable = 0; |
---|
584 | 673 | } |
---|
585 | 674 | |
---|
586 | 675 | if (reason & MCSR_NMI) |
---|
587 | | - printk("Non-maskable interrupt\n"); |
---|
| 676 | + pr_cont("Non-maskable interrupt\n"); |
---|
588 | 677 | |
---|
589 | 678 | if (reason & MCSR_IF) { |
---|
590 | | - printk("Instruction Fetch Error Report\n"); |
---|
| 679 | + pr_cont("Instruction Fetch Error Report\n"); |
---|
591 | 680 | recoverable = 0; |
---|
592 | 681 | } |
---|
593 | 682 | |
---|
594 | 683 | if (reason & MCSR_LD) { |
---|
595 | | - printk("Load Error Report\n"); |
---|
| 684 | + pr_cont("Load Error Report\n"); |
---|
596 | 685 | recoverable = 0; |
---|
597 | 686 | } |
---|
598 | 687 | |
---|
599 | 688 | if (reason & MCSR_ST) { |
---|
600 | | - printk("Store Error Report\n"); |
---|
| 689 | + pr_cont("Store Error Report\n"); |
---|
601 | 690 | recoverable = 0; |
---|
602 | 691 | } |
---|
603 | 692 | |
---|
604 | 693 | if (reason & MCSR_LDG) { |
---|
605 | | - printk("Guarded Load Error Report\n"); |
---|
| 694 | + pr_cont("Guarded Load Error Report\n"); |
---|
606 | 695 | recoverable = 0; |
---|
607 | 696 | } |
---|
608 | 697 | |
---|
609 | 698 | if (reason & MCSR_TLBSYNC) |
---|
610 | | - printk("Simultaneous tlbsync operations\n"); |
---|
| 699 | + pr_cont("Simultaneous tlbsync operations\n"); |
---|
611 | 700 | |
---|
612 | 701 | if (reason & MCSR_BSL2_ERR) { |
---|
613 | | - printk("Level 2 Cache Error\n"); |
---|
| 702 | + pr_cont("Level 2 Cache Error\n"); |
---|
614 | 703 | recoverable = 0; |
---|
615 | 704 | } |
---|
616 | 705 | |
---|
.. | .. |
---|
620 | 709 | addr = mfspr(SPRN_MCAR); |
---|
621 | 710 | addr |= (u64)mfspr(SPRN_MCARU) << 32; |
---|
622 | 711 | |
---|
623 | | - printk("Machine Check %s Address: %#llx\n", |
---|
| 712 | + pr_cont("Machine Check %s Address: %#llx\n", |
---|
624 | 713 | reason & MCSR_MEA ? "Effective" : "Physical", addr); |
---|
625 | 714 | } |
---|
626 | 715 | |
---|
.. | .. |
---|
644 | 733 | printk("Caused by (from MCSR=%lx): ", reason); |
---|
645 | 734 | |
---|
646 | 735 | if (reason & MCSR_MCP) |
---|
647 | | - printk("Machine Check Signal\n"); |
---|
| 736 | + pr_cont("Machine Check Signal\n"); |
---|
648 | 737 | if (reason & MCSR_ICPERR) |
---|
649 | | - printk("Instruction Cache Parity Error\n"); |
---|
| 738 | + pr_cont("Instruction Cache Parity Error\n"); |
---|
650 | 739 | if (reason & MCSR_DCP_PERR) |
---|
651 | | - printk("Data Cache Push Parity Error\n"); |
---|
| 740 | + pr_cont("Data Cache Push Parity Error\n"); |
---|
652 | 741 | if (reason & MCSR_DCPERR) |
---|
653 | | - printk("Data Cache Parity Error\n"); |
---|
| 742 | + pr_cont("Data Cache Parity Error\n"); |
---|
654 | 743 | if (reason & MCSR_BUS_IAERR) |
---|
655 | | - printk("Bus - Instruction Address Error\n"); |
---|
| 744 | + pr_cont("Bus - Instruction Address Error\n"); |
---|
656 | 745 | if (reason & MCSR_BUS_RAERR) |
---|
657 | | - printk("Bus - Read Address Error\n"); |
---|
| 746 | + pr_cont("Bus - Read Address Error\n"); |
---|
658 | 747 | if (reason & MCSR_BUS_WAERR) |
---|
659 | | - printk("Bus - Write Address Error\n"); |
---|
| 748 | + pr_cont("Bus - Write Address Error\n"); |
---|
660 | 749 | if (reason & MCSR_BUS_IBERR) |
---|
661 | | - printk("Bus - Instruction Data Error\n"); |
---|
| 750 | + pr_cont("Bus - Instruction Data Error\n"); |
---|
662 | 751 | if (reason & MCSR_BUS_RBERR) |
---|
663 | | - printk("Bus - Read Data Bus Error\n"); |
---|
| 752 | + pr_cont("Bus - Read Data Bus Error\n"); |
---|
664 | 753 | if (reason & MCSR_BUS_WBERR) |
---|
665 | | - printk("Bus - Write Data Bus Error\n"); |
---|
| 754 | + pr_cont("Bus - Write Data Bus Error\n"); |
---|
666 | 755 | if (reason & MCSR_BUS_IPERR) |
---|
667 | | - printk("Bus - Instruction Parity Error\n"); |
---|
| 756 | + pr_cont("Bus - Instruction Parity Error\n"); |
---|
668 | 757 | if (reason & MCSR_BUS_RPERR) |
---|
669 | | - printk("Bus - Read Parity Error\n"); |
---|
| 758 | + pr_cont("Bus - Read Parity Error\n"); |
---|
670 | 759 | |
---|
671 | 760 | return 0; |
---|
672 | 761 | } |
---|
.. | .. |
---|
684 | 773 | printk("Caused by (from MCSR=%lx): ", reason); |
---|
685 | 774 | |
---|
686 | 775 | if (reason & MCSR_MCP) |
---|
687 | | - printk("Machine Check Signal\n"); |
---|
| 776 | + pr_cont("Machine Check Signal\n"); |
---|
688 | 777 | if (reason & MCSR_CP_PERR) |
---|
689 | | - printk("Cache Push Parity Error\n"); |
---|
| 778 | + pr_cont("Cache Push Parity Error\n"); |
---|
690 | 779 | if (reason & MCSR_CPERR) |
---|
691 | | - printk("Cache Parity Error\n"); |
---|
| 780 | + pr_cont("Cache Parity Error\n"); |
---|
692 | 781 | if (reason & MCSR_EXCP_ERR) |
---|
693 | | - printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); |
---|
| 782 | + pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); |
---|
694 | 783 | if (reason & MCSR_BUS_IRERR) |
---|
695 | | - printk("Bus - Read Bus Error on instruction fetch\n"); |
---|
| 784 | + pr_cont("Bus - Read Bus Error on instruction fetch\n"); |
---|
696 | 785 | if (reason & MCSR_BUS_DRERR) |
---|
697 | | - printk("Bus - Read Bus Error on data load\n"); |
---|
| 786 | + pr_cont("Bus - Read Bus Error on data load\n"); |
---|
698 | 787 | if (reason & MCSR_BUS_WRERR) |
---|
699 | | - printk("Bus - Write Bus Error on buffered store or cache line push\n"); |
---|
| 788 | + pr_cont("Bus - Write Bus Error on buffered store or cache line push\n"); |
---|
700 | 789 | |
---|
701 | 790 | return 0; |
---|
702 | 791 | } |
---|
.. | .. |
---|
709 | 798 | printk("Caused by (from SRR1=%lx): ", reason); |
---|
710 | 799 | switch (reason & 0x601F0000) { |
---|
711 | 800 | case 0x80000: |
---|
712 | | - printk("Machine check signal\n"); |
---|
| 801 | + pr_cont("Machine check signal\n"); |
---|
713 | 802 | break; |
---|
714 | | - case 0: /* for 601 */ |
---|
715 | 803 | case 0x40000: |
---|
716 | 804 | case 0x140000: /* 7450 MSS error and TEA */ |
---|
717 | | - printk("Transfer error ack signal\n"); |
---|
| 805 | + pr_cont("Transfer error ack signal\n"); |
---|
718 | 806 | break; |
---|
719 | 807 | case 0x20000: |
---|
720 | | - printk("Data parity error signal\n"); |
---|
| 808 | + pr_cont("Data parity error signal\n"); |
---|
721 | 809 | break; |
---|
722 | 810 | case 0x10000: |
---|
723 | | - printk("Address parity error signal\n"); |
---|
| 811 | + pr_cont("Address parity error signal\n"); |
---|
724 | 812 | break; |
---|
725 | 813 | case 0x20000000: |
---|
726 | | - printk("L1 Data Cache error\n"); |
---|
| 814 | + pr_cont("L1 Data Cache error\n"); |
---|
727 | 815 | break; |
---|
728 | 816 | case 0x40000000: |
---|
729 | | - printk("L1 Instruction Cache error\n"); |
---|
| 817 | + pr_cont("L1 Instruction Cache error\n"); |
---|
730 | 818 | break; |
---|
731 | 819 | case 0x00100000: |
---|
732 | | - printk("L2 data cache parity error\n"); |
---|
| 820 | + pr_cont("L2 data cache parity error\n"); |
---|
733 | 821 | break; |
---|
734 | 822 | default: |
---|
735 | | - printk("Unknown values in msr\n"); |
---|
| 823 | + pr_cont("Unknown values in msr\n"); |
---|
736 | 824 | } |
---|
737 | 825 | return 0; |
---|
738 | 826 | } |
---|
.. | .. |
---|
741 | 829 | void machine_check_exception(struct pt_regs *regs) |
---|
742 | 830 | { |
---|
743 | 831 | int recover = 0; |
---|
744 | | - bool nested = in_nmi(); |
---|
745 | | - if (!nested) |
---|
746 | | - nmi_enter(); |
---|
747 | 832 | |
---|
748 | | - /* 64s accounts the mce in machine_check_early when in HVMODE */ |
---|
749 | | - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE)) |
---|
750 | | - __this_cpu_inc(irq_stat.mce_exceptions); |
---|
| 833 | + /* |
---|
| 834 | + * BOOK3S_64 does not call this handler as a non-maskable interrupt |
---|
| 835 | + * (it uses its own early real-mode handler to handle the MCE proper |
---|
| 836 | + * and then raises irq_work to call this handler when interrupts are |
---|
| 837 | + * enabled). |
---|
| 838 | + * |
---|
| 839 | + * This is silly. The BOOK3S_64 should just call a different function |
---|
| 840 | + * rather than expecting semantics to magically change. Something |
---|
| 841 | + * like 'non_nmi_machine_check_exception()', perhaps? |
---|
| 842 | + */ |
---|
| 843 | + const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64); |
---|
| 844 | + |
---|
| 845 | + if (nmi) nmi_enter(); |
---|
| 846 | + |
---|
| 847 | + __this_cpu_inc(irq_stat.mce_exceptions); |
---|
751 | 848 | |
---|
752 | 849 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
---|
753 | 850 | |
---|
.. | .. |
---|
771 | 868 | if (check_io_access(regs)) |
---|
772 | 869 | goto bail; |
---|
773 | 870 | |
---|
774 | | - if (!nested) |
---|
775 | | - nmi_exit(); |
---|
| 871 | + if (nmi) nmi_exit(); |
---|
776 | 872 | |
---|
777 | 873 | die("Machine check", regs, SIGBUS); |
---|
778 | 874 | |
---|
.. | .. |
---|
783 | 879 | return; |
---|
784 | 880 | |
---|
785 | 881 | bail: |
---|
786 | | - if (!nested) |
---|
787 | | - nmi_exit(); |
---|
| 882 | + if (nmi) nmi_exit(); |
---|
788 | 883 | } |
---|
789 | 884 | |
---|
790 | 885 | void SMIException(struct pt_regs *regs) |
---|
.. | .. |
---|
845 | 940 | addr = (__force const void __user *)ea; |
---|
846 | 941 | |
---|
847 | 942 | /* Check it */ |
---|
848 | | - if (!access_ok(VERIFY_READ, addr, 16)) { |
---|
| 943 | + if (!access_ok(addr, 16)) { |
---|
849 | 944 | pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" |
---|
850 | 945 | " instr=%08x addr=%016lx\n", |
---|
851 | 946 | smp_processor_id(), current->comm, current->pid, |
---|
.. | .. |
---|
1442 | 1537 | goto bail; |
---|
1443 | 1538 | } else { |
---|
1444 | 1539 | printk(KERN_EMERG "Unexpected TM Bad Thing exception " |
---|
1445 | | - "at %lx (msr 0x%x)\n", regs->nip, reason); |
---|
| 1540 | + "at %lx (msr 0x%lx) tm_scratch=%llx\n", |
---|
| 1541 | + regs->nip, regs->msr, get_paca()->tm_scratch); |
---|
1446 | 1542 | die("Unrecoverable exception", regs, SIGABRT); |
---|
1447 | 1543 | } |
---|
1448 | 1544 | } |
---|
.. | .. |
---|
1512 | 1608 | { |
---|
1513 | 1609 | enum ctx_state prev_state = exception_enter(); |
---|
1514 | 1610 | int sig, code, fixed = 0; |
---|
| 1611 | + unsigned long reason; |
---|
1515 | 1612 | |
---|
1516 | 1613 | /* We restore the interrupt state now */ |
---|
1517 | 1614 | if (!arch_irq_disabled_regs(regs)) |
---|
1518 | 1615 | local_irq_enable(); |
---|
| 1616 | + |
---|
| 1617 | + reason = get_reason(regs); |
---|
| 1618 | + |
---|
| 1619 | + if (reason & REASON_BOUNDARY) { |
---|
| 1620 | + sig = SIGBUS; |
---|
| 1621 | + code = BUS_ADRALN; |
---|
| 1622 | + goto bad; |
---|
| 1623 | + } |
---|
1519 | 1624 | |
---|
1520 | 1625 | if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) |
---|
1521 | 1626 | goto bail; |
---|
.. | .. |
---|
1525 | 1630 | fixed = fix_alignment(regs); |
---|
1526 | 1631 | |
---|
1527 | 1632 | if (fixed == 1) { |
---|
1528 | | - regs->nip += 4; /* skip over emulated instruction */ |
---|
| 1633 | + /* skip over emulated instruction */ |
---|
| 1634 | + regs->nip += inst_length(reason); |
---|
1529 | 1635 | emulate_single_step(regs); |
---|
1530 | 1636 | goto bail; |
---|
1531 | 1637 | } |
---|
.. | .. |
---|
1538 | 1644 | sig = SIGBUS; |
---|
1539 | 1645 | code = BUS_ADRALN; |
---|
1540 | 1646 | } |
---|
| 1647 | +bad: |
---|
1541 | 1648 | if (user_mode(regs)) |
---|
1542 | 1649 | _exception(sig, regs, code, regs->dar); |
---|
1543 | 1650 | else |
---|
.. | .. |
---|
1556 | 1663 | panic("kernel stack overflow"); |
---|
1557 | 1664 | } |
---|
1558 | 1665 | |
---|
1559 | | -void nonrecoverable_exception(struct pt_regs *regs) |
---|
| 1666 | +void stack_overflow_exception(struct pt_regs *regs) |
---|
1560 | 1667 | { |
---|
1561 | | - printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", |
---|
1562 | | - regs->nip, regs->msr); |
---|
1563 | | - debugger(regs); |
---|
1564 | | - die("nonrecoverable exception", regs, SIGKILL); |
---|
| 1668 | + enum ctx_state prev_state = exception_enter(); |
---|
| 1669 | + |
---|
| 1670 | + die("Kernel stack overflow", regs, SIGSEGV); |
---|
| 1671 | + |
---|
| 1672 | + exception_exit(prev_state); |
---|
1565 | 1673 | } |
---|
1566 | 1674 | |
---|
1567 | 1675 | void kernel_fp_unavailable_exception(struct pt_regs *regs) |
---|
.. | .. |
---|
1638 | 1746 | [FSCR_TAR_LG] = "TAR", |
---|
1639 | 1747 | [FSCR_MSGP_LG] = "MSGP", |
---|
1640 | 1748 | [FSCR_SCV_LG] = "SCV", |
---|
| 1749 | + [FSCR_PREFIX_LG] = "PREFIX", |
---|
1641 | 1750 | }; |
---|
1642 | 1751 | char *facility = "unknown"; |
---|
1643 | 1752 | u64 value; |
---|
.. | .. |
---|
1759 | 1868 | * checkpointed FP registers need to be loaded. |
---|
1760 | 1869 | */ |
---|
1761 | 1870 | tm_reclaim_current(TM_CAUSE_FAC_UNAV); |
---|
1762 | | - /* Reclaim didn't save out any FPRs to transact_fprs. */ |
---|
| 1871 | + |
---|
| 1872 | + /* |
---|
| 1873 | + * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and |
---|
| 1874 | + * then it was overwrite by the thr->fp_state by tm_reclaim_thread(). |
---|
| 1875 | + * |
---|
| 1876 | + * At this point, ck{fp,vr}_state contains the exact values we want to |
---|
| 1877 | + * recheckpoint. |
---|
| 1878 | + */ |
---|
1763 | 1879 | |
---|
1764 | 1880 | /* Enable FP for the task: */ |
---|
1765 | 1881 | current->thread.load_fp = 1; |
---|
1766 | 1882 | |
---|
1767 | | - /* This loads and recheckpoints the FP registers from |
---|
1768 | | - * thread.fpr[]. They will remain in registers after the |
---|
1769 | | - * checkpoint so we don't need to reload them after. |
---|
1770 | | - * If VMX is in use, the VRs now hold checkpointed values, |
---|
1771 | | - * so we don't want to load the VRs from the thread_struct. |
---|
| 1883 | + /* |
---|
| 1884 | + * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers. |
---|
1772 | 1885 | */ |
---|
1773 | 1886 | tm_recheckpoint(¤t->thread); |
---|
1774 | 1887 | } |
---|
.. | .. |
---|
1813 | 1926 | } |
---|
1814 | 1927 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
---|
1815 | 1928 | |
---|
1816 | | -void performance_monitor_exception(struct pt_regs *regs) |
---|
| 1929 | +static void performance_monitor_exception_nmi(struct pt_regs *regs) |
---|
1817 | 1930 | { |
---|
| 1931 | + nmi_enter(); |
---|
| 1932 | + |
---|
1818 | 1933 | __this_cpu_inc(irq_stat.pmu_irqs); |
---|
1819 | 1934 | |
---|
1820 | 1935 | perf_irq(regs); |
---|
| 1936 | + |
---|
| 1937 | + nmi_exit(); |
---|
| 1938 | +} |
---|
| 1939 | + |
---|
| 1940 | +static void performance_monitor_exception_async(struct pt_regs *regs) |
---|
| 1941 | +{ |
---|
| 1942 | + irq_enter(); |
---|
| 1943 | + |
---|
| 1944 | + __this_cpu_inc(irq_stat.pmu_irqs); |
---|
| 1945 | + |
---|
| 1946 | + perf_irq(regs); |
---|
| 1947 | + |
---|
| 1948 | + irq_exit(); |
---|
| 1949 | +} |
---|
| 1950 | + |
---|
| 1951 | +void performance_monitor_exception(struct pt_regs *regs) |
---|
| 1952 | +{ |
---|
| 1953 | + /* |
---|
| 1954 | + * On 64-bit, if perf interrupts hit in a local_irq_disable |
---|
| 1955 | + * (soft-masked) region, we consider them as NMIs. This is required to |
---|
| 1956 | + * prevent hash faults on user addresses when reading callchains (and |
---|
| 1957 | + * looks better from an irq tracing perspective). |
---|
| 1958 | + */ |
---|
| 1959 | + if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) |
---|
| 1960 | + performance_monitor_exception_nmi(regs); |
---|
| 1961 | + else |
---|
| 1962 | + performance_monitor_exception_async(regs); |
---|
1821 | 1963 | } |
---|
1822 | 1964 | |
---|
1823 | 1965 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
---|
.. | .. |
---|
1950 | 2092 | NOKPROBE_SYMBOL(DebugException); |
---|
1951 | 2093 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
---|
1952 | 2094 | |
---|
1953 | | -#if !defined(CONFIG_TAU_INT) |
---|
1954 | | -void TAUException(struct pt_regs *regs) |
---|
1955 | | -{ |
---|
1956 | | - printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", |
---|
1957 | | - regs->nip, regs->msr, regs->trap, print_tainted()); |
---|
1958 | | -} |
---|
1959 | | -#endif /* CONFIG_INT_TAU */ |
---|
1960 | | - |
---|
1961 | 2095 | #ifdef CONFIG_ALTIVEC |
---|
1962 | 2096 | void altivec_assist_exception(struct pt_regs *regs) |
---|
1963 | 2097 | { |
---|
.. | .. |
---|
2015 | 2149 | int code = FPE_FLTUNK; |
---|
2016 | 2150 | int err; |
---|
2017 | 2151 | |
---|
| 2152 | + /* We restore the interrupt state now */ |
---|
| 2153 | + if (!arch_irq_disabled_regs(regs)) |
---|
| 2154 | + local_irq_enable(); |
---|
| 2155 | + |
---|
2018 | 2156 | flush_spe_to_thread(current); |
---|
2019 | 2157 | |
---|
2020 | 2158 | spefscr = current->thread.spefscr; |
---|
.. | .. |
---|
2060 | 2198 | extern int speround_handler(struct pt_regs *regs); |
---|
2061 | 2199 | int err; |
---|
2062 | 2200 | |
---|
| 2201 | + /* We restore the interrupt state now */ |
---|
| 2202 | + if (!arch_irq_disabled_regs(regs)) |
---|
| 2203 | + local_irq_enable(); |
---|
| 2204 | + |
---|
2063 | 2205 | preempt_disable(); |
---|
2064 | 2206 | if (regs->msr & MSR_SPE) |
---|
2065 | 2207 | giveup_spe(current); |
---|
.. | .. |
---|
2095 | 2237 | */ |
---|
2096 | 2238 | void unrecoverable_exception(struct pt_regs *regs) |
---|
2097 | 2239 | { |
---|
2098 | | - printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", |
---|
2099 | | - regs->trap, regs->nip); |
---|
| 2240 | + pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", |
---|
| 2241 | + regs->trap, regs->nip, regs->msr); |
---|
2100 | 2242 | die("Unrecoverable exception", regs, SIGABRT); |
---|
2101 | 2243 | } |
---|
2102 | 2244 | NOKPROBE_SYMBOL(unrecoverable_exception); |
---|
.. | .. |
---|
2184 | 2326 | |
---|
2185 | 2327 | static int __init ppc_warn_emulated_init(void) |
---|
2186 | 2328 | { |
---|
2187 | | - struct dentry *dir, *d; |
---|
| 2329 | + struct dentry *dir; |
---|
2188 | 2330 | unsigned int i; |
---|
2189 | 2331 | struct ppc_emulated_entry *entries = (void *)&ppc_emulated; |
---|
2190 | 2332 | |
---|
2191 | | - if (!powerpc_debugfs_root) |
---|
2192 | | - return -ENODEV; |
---|
2193 | | - |
---|
2194 | 2333 | dir = debugfs_create_dir("emulated_instructions", |
---|
2195 | 2334 | powerpc_debugfs_root); |
---|
2196 | | - if (!dir) |
---|
2197 | | - return -ENOMEM; |
---|
2198 | 2335 | |
---|
2199 | | - d = debugfs_create_u32("do_warn", 0644, dir, |
---|
2200 | | - &ppc_warn_emulated); |
---|
2201 | | - if (!d) |
---|
2202 | | - goto fail; |
---|
| 2336 | + debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated); |
---|
2203 | 2337 | |
---|
2204 | | - for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { |
---|
2205 | | - d = debugfs_create_u32(entries[i].name, 0644, dir, |
---|
2206 | | - (u32 *)&entries[i].val.counter); |
---|
2207 | | - if (!d) |
---|
2208 | | - goto fail; |
---|
2209 | | - } |
---|
| 2338 | + for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) |
---|
| 2339 | + debugfs_create_u32(entries[i].name, 0644, dir, |
---|
| 2340 | + (u32 *)&entries[i].val.counter); |
---|
2210 | 2341 | |
---|
2211 | 2342 | return 0; |
---|
2212 | | - |
---|
2213 | | -fail: |
---|
2214 | | - debugfs_remove_recursive(dir); |
---|
2215 | | - return -ENOMEM; |
---|
2216 | 2343 | } |
---|
2217 | 2344 | |
---|
2218 | 2345 | device_initcall(ppc_warn_emulated_init); |
---|