.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
---|
3 | 4 | * Copyright 2007-2010 Freescale Semiconductor, Inc. |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or |
---|
6 | | - * modify it under the terms of the GNU General Public License |
---|
7 | | - * as published by the Free Software Foundation; either version |
---|
8 | | - * 2 of the License, or (at your option) any later version. |
---|
9 | 5 | * |
---|
10 | 6 | * Modified by Cort Dougan (cort@cs.nmt.edu) |
---|
11 | 7 | * and Paul Mackerras (paulus@samba.org) |
---|
.. | .. |
---|
43 | 39 | #include <linux/kmsg_dump.h> |
---|
44 | 40 | |
---|
45 | 41 | #include <asm/emulated_ops.h> |
---|
46 | | -#include <asm/pgtable.h> |
---|
47 | 42 | #include <linux/uaccess.h> |
---|
48 | 43 | #include <asm/debugfs.h> |
---|
49 | 44 | #include <asm/io.h> |
---|
.. | .. |
---|
71 | 66 | #include <sysdev/fsl_pci.h> |
---|
72 | 67 | #include <asm/kprobes.h> |
---|
73 | 68 | #include <asm/stacktrace.h> |
---|
| 69 | +#include <asm/nmi.h> |
---|
74 | 70 | |
---|
75 | 71 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) |
---|
76 | 72 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
---|
.. | .. |
---|
178 | 174 | kmsg_dump(KMSG_DUMP_PANIC); |
---|
179 | 175 | bust_spinlocks(0); |
---|
180 | 176 | debug_locks_off(); |
---|
181 | | - console_flush_on_panic(); |
---|
| 177 | + console_flush_on_panic(CONSOLE_FLUSH_PENDING); |
---|
182 | 178 | } |
---|
183 | 179 | |
---|
184 | 180 | static unsigned long oops_begin(struct pt_regs *regs) |
---|
.. | .. |
---|
247 | 243 | mdelay(MSEC_PER_SEC); |
---|
248 | 244 | } |
---|
249 | 245 | |
---|
250 | | - if (in_interrupt()) |
---|
251 | | - panic("Fatal exception in interrupt"); |
---|
252 | 246 | if (panic_on_oops) |
---|
253 | 247 | panic("Fatal exception"); |
---|
254 | | - do_exit(signr); |
---|
| 248 | + make_task_dead(signr); |
---|
255 | 249 | } |
---|
256 | 250 | NOKPROBE_SYMBOL(oops_end); |
---|
| 251 | + |
---|
| 252 | +static char *get_mmu_str(void) |
---|
| 253 | +{ |
---|
| 254 | + if (early_radix_enabled()) |
---|
| 255 | + return " MMU=Radix"; |
---|
| 256 | + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) |
---|
| 257 | + return " MMU=Hash"; |
---|
| 258 | + return ""; |
---|
| 259 | +} |
---|
257 | 260 | |
---|
258 | 261 | static int __die(const char *str, struct pt_regs *regs, long err) |
---|
259 | 262 | { |
---|
260 | 263 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); |
---|
261 | 264 | |
---|
262 | | - if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) |
---|
263 | | - printk("LE "); |
---|
264 | | - else |
---|
265 | | - printk("BE "); |
---|
266 | | - |
---|
267 | | - if (IS_ENABLED(CONFIG_PREEMPT)) |
---|
268 | | - pr_cont("PREEMPT "); |
---|
269 | | - |
---|
270 | | - if (IS_ENABLED(CONFIG_SMP)) |
---|
271 | | - pr_cont("SMP NR_CPUS=%d ", NR_CPUS); |
---|
272 | | - |
---|
273 | | - if (debug_pagealloc_enabled()) |
---|
274 | | - pr_cont("DEBUG_PAGEALLOC "); |
---|
275 | | - |
---|
276 | | - if (IS_ENABLED(CONFIG_NUMA)) |
---|
277 | | - pr_cont("NUMA "); |
---|
278 | | - |
---|
279 | | - pr_cont("%s\n", ppc_md.name ? ppc_md.name : ""); |
---|
| 265 | + printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", |
---|
| 266 | + IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", |
---|
| 267 | + PAGE_SIZE / 1024, get_mmu_str(), |
---|
| 268 | + IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", |
---|
| 269 | + IS_ENABLED(CONFIG_SMP) ? " SMP" : "", |
---|
| 270 | + IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", |
---|
| 271 | + debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", |
---|
| 272 | + IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "", |
---|
| 273 | + ppc_md.name ? ppc_md.name : ""); |
---|
280 | 274 | |
---|
281 | 275 | if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) |
---|
282 | 276 | return 1; |
---|
.. | .. |
---|
307 | 301 | } |
---|
308 | 302 | NOKPROBE_SYMBOL(die); |
---|
309 | 303 | |
---|
310 | | -void user_single_step_siginfo(struct task_struct *tsk, |
---|
311 | | - struct pt_regs *regs, siginfo_t *info) |
---|
| 304 | +void user_single_step_report(struct pt_regs *regs) |
---|
312 | 305 | { |
---|
313 | | - info->si_signo = SIGTRAP; |
---|
314 | | - info->si_code = TRAP_TRACE; |
---|
315 | | - info->si_addr = (void __user *)regs->nip; |
---|
| 306 | + force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip); |
---|
316 | 307 | } |
---|
317 | 308 | |
---|
318 | 309 | static void show_signal_msg(int signr, struct pt_regs *regs, int code, |
---|
.. | .. |
---|
341 | 332 | show_user_instructions(regs); |
---|
342 | 333 | } |
---|
343 | 334 | |
---|
344 | | -void _exception_pkey(int signr, struct pt_regs *regs, int code, |
---|
345 | | - unsigned long addr, int key) |
---|
| 335 | +static bool exception_common(int signr, struct pt_regs *regs, int code, |
---|
| 336 | + unsigned long addr) |
---|
346 | 337 | { |
---|
347 | | - siginfo_t info; |
---|
348 | | - |
---|
349 | 338 | if (!user_mode(regs)) { |
---|
350 | 339 | die("Exception in kernel mode", regs, signr); |
---|
351 | | - return; |
---|
| 340 | + return false; |
---|
352 | 341 | } |
---|
353 | 342 | |
---|
354 | 343 | show_signal_msg(signr, regs, code, addr); |
---|
.. | .. |
---|
364 | 353 | */ |
---|
365 | 354 | thread_pkey_regs_save(¤t->thread); |
---|
366 | 355 | |
---|
367 | | - clear_siginfo(&info); |
---|
368 | | - info.si_signo = signr; |
---|
369 | | - info.si_code = code; |
---|
370 | | - info.si_addr = (void __user *) addr; |
---|
371 | | - info.si_pkey = key; |
---|
| 356 | + return true; |
---|
| 357 | +} |
---|
372 | 358 | |
---|
373 | | - force_sig_info(signr, &info, current); |
---|
| 359 | +void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key) |
---|
| 360 | +{ |
---|
| 361 | + if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr)) |
---|
| 362 | + return; |
---|
| 363 | + |
---|
| 364 | + force_sig_pkuerr((void __user *) addr, key); |
---|
374 | 365 | } |
---|
375 | 366 | |
---|
376 | 367 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) |
---|
377 | 368 | { |
---|
378 | | - _exception_pkey(signr, regs, code, addr, 0); |
---|
| 369 | + if (!exception_common(signr, regs, code, addr)) |
---|
| 370 | + return; |
---|
| 371 | + |
---|
| 372 | + force_sig_fault(signr, code, (void __user *)addr); |
---|
| 373 | +} |
---|
| 374 | + |
---|
| 375 | +/* |
---|
| 376 | + * The interrupt architecture has a quirk in that the HV interrupts excluding |
---|
| 377 | + * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing |
---|
| 378 | + * that an interrupt handler must do is save off a GPR into a scratch register, |
---|
| 379 | + * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch. |
---|
| 380 | + * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing |
---|
| 381 | + * that it is non-reentrant, which leads to random data corruption. |
---|
| 382 | + * |
---|
| 383 | + * The solution is for NMI interrupts in HV mode to check if they originated |
---|
| 384 | + * from these critical HV interrupt regions. If so, then mark them not |
---|
| 385 | + * recoverable. |
---|
| 386 | + * |
---|
| 387 | + * An alternative would be for HV NMIs to use SPRG for scratch to avoid the |
---|
| 388 | + * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux |
---|
| 389 | + * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so |
---|
| 390 | + * that would work. However any other guest OS that may have the SPRG live |
---|
| 391 | + * and MSR[RI]=1 could encounter silent corruption. |
---|
| 392 | + * |
---|
| 393 | + * Builds that do not support KVM could take this second option to increase |
---|
| 394 | + * the recoverability of NMIs. |
---|
| 395 | + */ |
---|
| 396 | +void hv_nmi_check_nonrecoverable(struct pt_regs *regs) |
---|
| 397 | +{ |
---|
| 398 | +#ifdef CONFIG_PPC_POWERNV |
---|
| 399 | + unsigned long kbase = (unsigned long)_stext; |
---|
| 400 | + unsigned long nip = regs->nip; |
---|
| 401 | + |
---|
| 402 | + if (!(regs->msr & MSR_RI)) |
---|
| 403 | + return; |
---|
| 404 | + if (!(regs->msr & MSR_HV)) |
---|
| 405 | + return; |
---|
| 406 | + if (regs->msr & MSR_PR) |
---|
| 407 | + return; |
---|
| 408 | + |
---|
| 409 | + /* |
---|
| 410 | + * Now test if the interrupt has hit a range that may be using |
---|
| 411 | + * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The |
---|
| 412 | + * problem ranges all run un-relocated. Test real and virt modes |
---|
| 413 | + * at the same time by droping the high bit of the nip (virt mode |
---|
| 414 | + * entry points still have the +0x4000 offset). |
---|
| 415 | + */ |
---|
| 416 | + nip &= ~0xc000000000000000ULL; |
---|
| 417 | + if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600)) |
---|
| 418 | + goto nonrecoverable; |
---|
| 419 | + if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00)) |
---|
| 420 | + goto nonrecoverable; |
---|
| 421 | + if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0)) |
---|
| 422 | + goto nonrecoverable; |
---|
| 423 | + if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0)) |
---|
| 424 | + goto nonrecoverable; |
---|
| 425 | + |
---|
| 426 | + /* Trampoline code runs un-relocated so subtract kbase. */ |
---|
| 427 | + if (nip >= (unsigned long)(start_real_trampolines - kbase) && |
---|
| 428 | + nip < (unsigned long)(end_real_trampolines - kbase)) |
---|
| 429 | + goto nonrecoverable; |
---|
| 430 | + if (nip >= (unsigned long)(start_virt_trampolines - kbase) && |
---|
| 431 | + nip < (unsigned long)(end_virt_trampolines - kbase)) |
---|
| 432 | + goto nonrecoverable; |
---|
| 433 | + return; |
---|
| 434 | + |
---|
| 435 | +nonrecoverable: |
---|
| 436 | + regs->msr &= ~MSR_RI; |
---|
| 437 | +#endif |
---|
379 | 438 | } |
---|
380 | 439 | |
---|
381 | 440 | void system_reset_exception(struct pt_regs *regs) |
---|
382 | 441 | { |
---|
| 442 | + unsigned long hsrr0, hsrr1; |
---|
| 443 | + bool saved_hsrrs = false; |
---|
| 444 | + u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); |
---|
| 445 | + |
---|
| 446 | + this_cpu_set_ftrace_enabled(0); |
---|
| 447 | + |
---|
| 448 | + nmi_enter(); |
---|
| 449 | + |
---|
383 | 450 | /* |
---|
384 | | - * Avoid crashes in case of nested NMI exceptions. Recoverability |
---|
385 | | - * is determined by RI and in_nmi |
---|
| 451 | + * System reset can interrupt code where HSRRs are live and MSR[RI]=1. |
---|
| 452 | + * The system reset interrupt itself may clobber HSRRs (e.g., to call |
---|
| 453 | + * OPAL), so save them here and restore them before returning. |
---|
| 454 | + * |
---|
| 455 | + * Machine checks don't need to save HSRRs, as the real mode handler |
---|
| 456 | + * is careful to avoid them, and the regular handler is not delivered |
---|
| 457 | + * as an NMI. |
---|
386 | 458 | */ |
---|
387 | | - bool nested = in_nmi(); |
---|
388 | | - if (!nested) |
---|
389 | | - nmi_enter(); |
---|
| 459 | + if (cpu_has_feature(CPU_FTR_HVMODE)) { |
---|
| 460 | + hsrr0 = mfspr(SPRN_HSRR0); |
---|
| 461 | + hsrr1 = mfspr(SPRN_HSRR1); |
---|
| 462 | + saved_hsrrs = true; |
---|
| 463 | + } |
---|
| 464 | + |
---|
| 465 | + hv_nmi_check_nonrecoverable(regs); |
---|
390 | 466 | |
---|
391 | 467 | __this_cpu_inc(irq_stat.sreset_irqs); |
---|
392 | 468 | |
---|
.. | .. |
---|
439 | 515 | die("Unrecoverable System Reset", regs, SIGABRT); |
---|
440 | 516 | } |
---|
441 | 517 | |
---|
442 | | - if (!nested) |
---|
443 | | - nmi_exit(); |
---|
| 518 | + if (saved_hsrrs) { |
---|
| 519 | + mtspr(SPRN_HSRR0, hsrr0); |
---|
| 520 | + mtspr(SPRN_HSRR1, hsrr1); |
---|
| 521 | + } |
---|
| 522 | + |
---|
| 523 | + nmi_exit(); |
---|
| 524 | + |
---|
| 525 | + this_cpu_set_ftrace_enabled(ftrace_enabled); |
---|
444 | 526 | |
---|
445 | 527 | /* What should we do here? We could issue a shutdown or hard reset. */ |
---|
446 | 528 | } |
---|
.. | .. |
---|
450 | 532 | * Check if the NIP corresponds to the address of a sync |
---|
451 | 533 | * instruction for which there is an entry in the exception |
---|
452 | 534 | * table. |
---|
453 | | - * Note that the 601 only takes a machine check on TEA |
---|
454 | | - * (transfer error ack) signal assertion, and does not |
---|
455 | | - * set any of the top 16 bits of SRR1. |
---|
456 | 535 | * -- paulus. |
---|
457 | 536 | */ |
---|
458 | 537 | static inline int check_io_access(struct pt_regs *regs) |
---|
.. | .. |
---|
501 | 580 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) |
---|
502 | 581 | #define REASON_PRIVILEGED ESR_PPR |
---|
503 | 582 | #define REASON_TRAP ESR_PTR |
---|
| 583 | +#define REASON_PREFIXED 0 |
---|
| 584 | +#define REASON_BOUNDARY 0 |
---|
504 | 585 | |
---|
505 | 586 | /* single-step stuff */ |
---|
506 | 587 | #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) |
---|
.. | .. |
---|
515 | 596 | #define REASON_ILLEGAL SRR1_PROGILL |
---|
516 | 597 | #define REASON_PRIVILEGED SRR1_PROGPRIV |
---|
517 | 598 | #define REASON_TRAP SRR1_PROGTRAP |
---|
| 599 | +#define REASON_PREFIXED SRR1_PREFIXED |
---|
| 600 | +#define REASON_BOUNDARY SRR1_BOUNDARY |
---|
518 | 601 | |
---|
519 | 602 | #define single_stepping(regs) ((regs)->msr & MSR_SE) |
---|
520 | 603 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) |
---|
521 | 604 | #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) |
---|
522 | 605 | #endif |
---|
| 606 | + |
---|
| 607 | +#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) |
---|
523 | 608 | |
---|
524 | 609 | #if defined(CONFIG_E500) |
---|
525 | 610 | int machine_check_e500mc(struct pt_regs *regs) |
---|
.. | .. |
---|
539 | 624 | printk("Caused by (from MCSR=%lx): ", reason); |
---|
540 | 625 | |
---|
541 | 626 | if (reason & MCSR_MCP) |
---|
542 | | - printk("Machine Check Signal\n"); |
---|
| 627 | + pr_cont("Machine Check Signal\n"); |
---|
543 | 628 | |
---|
544 | 629 | if (reason & MCSR_ICPERR) { |
---|
545 | | - printk("Instruction Cache Parity Error\n"); |
---|
| 630 | + pr_cont("Instruction Cache Parity Error\n"); |
---|
546 | 631 | |
---|
547 | 632 | /* |
---|
548 | 633 | * This is recoverable by invalidating the i-cache. |
---|
.. | .. |
---|
560 | 645 | } |
---|
561 | 646 | |
---|
562 | 647 | if (reason & MCSR_DCPERR_MC) { |
---|
563 | | - printk("Data Cache Parity Error\n"); |
---|
| 648 | + pr_cont("Data Cache Parity Error\n"); |
---|
564 | 649 | |
---|
565 | 650 | /* |
---|
566 | 651 | * In write shadow mode we auto-recover from the error, but it |
---|
.. | .. |
---|
579 | 664 | } |
---|
580 | 665 | |
---|
581 | 666 | if (reason & MCSR_L2MMU_MHIT) { |
---|
582 | | - printk("Hit on multiple TLB entries\n"); |
---|
| 667 | + pr_cont("Hit on multiple TLB entries\n"); |
---|
583 | 668 | recoverable = 0; |
---|
584 | 669 | } |
---|
585 | 670 | |
---|
586 | 671 | if (reason & MCSR_NMI) |
---|
587 | | - printk("Non-maskable interrupt\n"); |
---|
| 672 | + pr_cont("Non-maskable interrupt\n"); |
---|
588 | 673 | |
---|
589 | 674 | if (reason & MCSR_IF) { |
---|
590 | | - printk("Instruction Fetch Error Report\n"); |
---|
| 675 | + pr_cont("Instruction Fetch Error Report\n"); |
---|
591 | 676 | recoverable = 0; |
---|
592 | 677 | } |
---|
593 | 678 | |
---|
594 | 679 | if (reason & MCSR_LD) { |
---|
595 | | - printk("Load Error Report\n"); |
---|
| 680 | + pr_cont("Load Error Report\n"); |
---|
596 | 681 | recoverable = 0; |
---|
597 | 682 | } |
---|
598 | 683 | |
---|
599 | 684 | if (reason & MCSR_ST) { |
---|
600 | | - printk("Store Error Report\n"); |
---|
| 685 | + pr_cont("Store Error Report\n"); |
---|
601 | 686 | recoverable = 0; |
---|
602 | 687 | } |
---|
603 | 688 | |
---|
604 | 689 | if (reason & MCSR_LDG) { |
---|
605 | | - printk("Guarded Load Error Report\n"); |
---|
| 690 | + pr_cont("Guarded Load Error Report\n"); |
---|
606 | 691 | recoverable = 0; |
---|
607 | 692 | } |
---|
608 | 693 | |
---|
609 | 694 | if (reason & MCSR_TLBSYNC) |
---|
610 | | - printk("Simultaneous tlbsync operations\n"); |
---|
| 695 | + pr_cont("Simultaneous tlbsync operations\n"); |
---|
611 | 696 | |
---|
612 | 697 | if (reason & MCSR_BSL2_ERR) { |
---|
613 | | - printk("Level 2 Cache Error\n"); |
---|
| 698 | + pr_cont("Level 2 Cache Error\n"); |
---|
614 | 699 | recoverable = 0; |
---|
615 | 700 | } |
---|
616 | 701 | |
---|
.. | .. |
---|
620 | 705 | addr = mfspr(SPRN_MCAR); |
---|
621 | 706 | addr |= (u64)mfspr(SPRN_MCARU) << 32; |
---|
622 | 707 | |
---|
623 | | - printk("Machine Check %s Address: %#llx\n", |
---|
| 708 | + pr_cont("Machine Check %s Address: %#llx\n", |
---|
624 | 709 | reason & MCSR_MEA ? "Effective" : "Physical", addr); |
---|
625 | 710 | } |
---|
626 | 711 | |
---|
.. | .. |
---|
644 | 729 | printk("Caused by (from MCSR=%lx): ", reason); |
---|
645 | 730 | |
---|
646 | 731 | if (reason & MCSR_MCP) |
---|
647 | | - printk("Machine Check Signal\n"); |
---|
| 732 | + pr_cont("Machine Check Signal\n"); |
---|
648 | 733 | if (reason & MCSR_ICPERR) |
---|
649 | | - printk("Instruction Cache Parity Error\n"); |
---|
| 734 | + pr_cont("Instruction Cache Parity Error\n"); |
---|
650 | 735 | if (reason & MCSR_DCP_PERR) |
---|
651 | | - printk("Data Cache Push Parity Error\n"); |
---|
| 736 | + pr_cont("Data Cache Push Parity Error\n"); |
---|
652 | 737 | if (reason & MCSR_DCPERR) |
---|
653 | | - printk("Data Cache Parity Error\n"); |
---|
| 738 | + pr_cont("Data Cache Parity Error\n"); |
---|
654 | 739 | if (reason & MCSR_BUS_IAERR) |
---|
655 | | - printk("Bus - Instruction Address Error\n"); |
---|
| 740 | + pr_cont("Bus - Instruction Address Error\n"); |
---|
656 | 741 | if (reason & MCSR_BUS_RAERR) |
---|
657 | | - printk("Bus - Read Address Error\n"); |
---|
| 742 | + pr_cont("Bus - Read Address Error\n"); |
---|
658 | 743 | if (reason & MCSR_BUS_WAERR) |
---|
659 | | - printk("Bus - Write Address Error\n"); |
---|
| 744 | + pr_cont("Bus - Write Address Error\n"); |
---|
660 | 745 | if (reason & MCSR_BUS_IBERR) |
---|
661 | | - printk("Bus - Instruction Data Error\n"); |
---|
| 746 | + pr_cont("Bus - Instruction Data Error\n"); |
---|
662 | 747 | if (reason & MCSR_BUS_RBERR) |
---|
663 | | - printk("Bus - Read Data Bus Error\n"); |
---|
| 748 | + pr_cont("Bus - Read Data Bus Error\n"); |
---|
664 | 749 | if (reason & MCSR_BUS_WBERR) |
---|
665 | | - printk("Bus - Write Data Bus Error\n"); |
---|
| 750 | + pr_cont("Bus - Write Data Bus Error\n"); |
---|
666 | 751 | if (reason & MCSR_BUS_IPERR) |
---|
667 | | - printk("Bus - Instruction Parity Error\n"); |
---|
| 752 | + pr_cont("Bus - Instruction Parity Error\n"); |
---|
668 | 753 | if (reason & MCSR_BUS_RPERR) |
---|
669 | | - printk("Bus - Read Parity Error\n"); |
---|
| 754 | + pr_cont("Bus - Read Parity Error\n"); |
---|
670 | 755 | |
---|
671 | 756 | return 0; |
---|
672 | 757 | } |
---|
.. | .. |
---|
684 | 769 | printk("Caused by (from MCSR=%lx): ", reason); |
---|
685 | 770 | |
---|
686 | 771 | if (reason & MCSR_MCP) |
---|
687 | | - printk("Machine Check Signal\n"); |
---|
| 772 | + pr_cont("Machine Check Signal\n"); |
---|
688 | 773 | if (reason & MCSR_CP_PERR) |
---|
689 | | - printk("Cache Push Parity Error\n"); |
---|
| 774 | + pr_cont("Cache Push Parity Error\n"); |
---|
690 | 775 | if (reason & MCSR_CPERR) |
---|
691 | | - printk("Cache Parity Error\n"); |
---|
| 776 | + pr_cont("Cache Parity Error\n"); |
---|
692 | 777 | if (reason & MCSR_EXCP_ERR) |
---|
693 | | - printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); |
---|
| 778 | + pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); |
---|
694 | 779 | if (reason & MCSR_BUS_IRERR) |
---|
695 | | - printk("Bus - Read Bus Error on instruction fetch\n"); |
---|
| 780 | + pr_cont("Bus - Read Bus Error on instruction fetch\n"); |
---|
696 | 781 | if (reason & MCSR_BUS_DRERR) |
---|
697 | | - printk("Bus - Read Bus Error on data load\n"); |
---|
| 782 | + pr_cont("Bus - Read Bus Error on data load\n"); |
---|
698 | 783 | if (reason & MCSR_BUS_WRERR) |
---|
699 | | - printk("Bus - Write Bus Error on buffered store or cache line push\n"); |
---|
| 784 | + pr_cont("Bus - Write Bus Error on buffered store or cache line push\n"); |
---|
700 | 785 | |
---|
701 | 786 | return 0; |
---|
702 | 787 | } |
---|
.. | .. |
---|
709 | 794 | printk("Caused by (from SRR1=%lx): ", reason); |
---|
710 | 795 | switch (reason & 0x601F0000) { |
---|
711 | 796 | case 0x80000: |
---|
712 | | - printk("Machine check signal\n"); |
---|
| 797 | + pr_cont("Machine check signal\n"); |
---|
713 | 798 | break; |
---|
714 | | - case 0: /* for 601 */ |
---|
715 | 799 | case 0x40000: |
---|
716 | 800 | case 0x140000: /* 7450 MSS error and TEA */ |
---|
717 | | - printk("Transfer error ack signal\n"); |
---|
| 801 | + pr_cont("Transfer error ack signal\n"); |
---|
718 | 802 | break; |
---|
719 | 803 | case 0x20000: |
---|
720 | | - printk("Data parity error signal\n"); |
---|
| 804 | + pr_cont("Data parity error signal\n"); |
---|
721 | 805 | break; |
---|
722 | 806 | case 0x10000: |
---|
723 | | - printk("Address parity error signal\n"); |
---|
| 807 | + pr_cont("Address parity error signal\n"); |
---|
724 | 808 | break; |
---|
725 | 809 | case 0x20000000: |
---|
726 | | - printk("L1 Data Cache error\n"); |
---|
| 810 | + pr_cont("L1 Data Cache error\n"); |
---|
727 | 811 | break; |
---|
728 | 812 | case 0x40000000: |
---|
729 | | - printk("L1 Instruction Cache error\n"); |
---|
| 813 | + pr_cont("L1 Instruction Cache error\n"); |
---|
730 | 814 | break; |
---|
731 | 815 | case 0x00100000: |
---|
732 | | - printk("L2 data cache parity error\n"); |
---|
| 816 | + pr_cont("L2 data cache parity error\n"); |
---|
733 | 817 | break; |
---|
734 | 818 | default: |
---|
735 | | - printk("Unknown values in msr\n"); |
---|
| 819 | + pr_cont("Unknown values in msr\n"); |
---|
736 | 820 | } |
---|
737 | 821 | return 0; |
---|
738 | 822 | } |
---|
.. | .. |
---|
741 | 825 | void machine_check_exception(struct pt_regs *regs) |
---|
742 | 826 | { |
---|
743 | 827 | int recover = 0; |
---|
744 | | - bool nested = in_nmi(); |
---|
745 | | - if (!nested) |
---|
746 | | - nmi_enter(); |
---|
747 | 828 | |
---|
748 | | - /* 64s accounts the mce in machine_check_early when in HVMODE */ |
---|
749 | | - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE)) |
---|
750 | | - __this_cpu_inc(irq_stat.mce_exceptions); |
---|
| 829 | + /* |
---|
| 830 | + * BOOK3S_64 does not call this handler as a non-maskable interrupt |
---|
| 831 | + * (it uses its own early real-mode handler to handle the MCE proper |
---|
| 832 | + * and then raises irq_work to call this handler when interrupts are |
---|
| 833 | + * enabled). |
---|
| 834 | + * |
---|
| 835 | + * This is silly. The BOOK3S_64 should just call a different function |
---|
| 836 | + * rather than expecting semantics to magically change. Something |
---|
| 837 | + * like 'non_nmi_machine_check_exception()', perhaps? |
---|
| 838 | + */ |
---|
| 839 | + const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64); |
---|
| 840 | + |
---|
| 841 | + if (nmi) nmi_enter(); |
---|
| 842 | + |
---|
| 843 | + __this_cpu_inc(irq_stat.mce_exceptions); |
---|
751 | 844 | |
---|
752 | 845 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
---|
753 | 846 | |
---|
.. | .. |
---|
771 | 864 | if (check_io_access(regs)) |
---|
772 | 865 | goto bail; |
---|
773 | 866 | |
---|
774 | | - if (!nested) |
---|
775 | | - nmi_exit(); |
---|
| 867 | + if (nmi) nmi_exit(); |
---|
776 | 868 | |
---|
777 | 869 | die("Machine check", regs, SIGBUS); |
---|
778 | 870 | |
---|
.. | .. |
---|
783 | 875 | return; |
---|
784 | 876 | |
---|
785 | 877 | bail: |
---|
786 | | - if (!nested) |
---|
787 | | - nmi_exit(); |
---|
| 878 | + if (nmi) nmi_exit(); |
---|
788 | 879 | } |
---|
789 | 880 | |
---|
790 | 881 | void SMIException(struct pt_regs *regs) |
---|
.. | .. |
---|
845 | 936 | addr = (__force const void __user *)ea; |
---|
846 | 937 | |
---|
847 | 938 | /* Check it */ |
---|
848 | | - if (!access_ok(VERIFY_READ, addr, 16)) { |
---|
| 939 | + if (!access_ok(addr, 16)) { |
---|
849 | 940 | pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" |
---|
850 | 941 | " instr=%08x addr=%016lx\n", |
---|
851 | 942 | smp_processor_id(), current->comm, current->pid, |
---|
.. | .. |
---|
1442 | 1533 | goto bail; |
---|
1443 | 1534 | } else { |
---|
1444 | 1535 | printk(KERN_EMERG "Unexpected TM Bad Thing exception " |
---|
1445 | | - "at %lx (msr 0x%x)\n", regs->nip, reason); |
---|
| 1536 | + "at %lx (msr 0x%lx) tm_scratch=%llx\n", |
---|
| 1537 | + regs->nip, regs->msr, get_paca()->tm_scratch); |
---|
1446 | 1538 | die("Unrecoverable exception", regs, SIGABRT); |
---|
1447 | 1539 | } |
---|
1448 | 1540 | } |
---|
.. | .. |
---|
1512 | 1604 | { |
---|
1513 | 1605 | enum ctx_state prev_state = exception_enter(); |
---|
1514 | 1606 | int sig, code, fixed = 0; |
---|
| 1607 | + unsigned long reason; |
---|
1515 | 1608 | |
---|
1516 | 1609 | /* We restore the interrupt state now */ |
---|
1517 | 1610 | if (!arch_irq_disabled_regs(regs)) |
---|
1518 | 1611 | local_irq_enable(); |
---|
| 1612 | + |
---|
| 1613 | + reason = get_reason(regs); |
---|
| 1614 | + |
---|
| 1615 | + if (reason & REASON_BOUNDARY) { |
---|
| 1616 | + sig = SIGBUS; |
---|
| 1617 | + code = BUS_ADRALN; |
---|
| 1618 | + goto bad; |
---|
| 1619 | + } |
---|
1519 | 1620 | |
---|
1520 | 1621 | if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) |
---|
1521 | 1622 | goto bail; |
---|
.. | .. |
---|
1525 | 1626 | fixed = fix_alignment(regs); |
---|
1526 | 1627 | |
---|
1527 | 1628 | if (fixed == 1) { |
---|
1528 | | - regs->nip += 4; /* skip over emulated instruction */ |
---|
| 1629 | + /* skip over emulated instruction */ |
---|
| 1630 | + regs->nip += inst_length(reason); |
---|
1529 | 1631 | emulate_single_step(regs); |
---|
1530 | 1632 | goto bail; |
---|
1531 | 1633 | } |
---|
.. | .. |
---|
1538 | 1640 | sig = SIGBUS; |
---|
1539 | 1641 | code = BUS_ADRALN; |
---|
1540 | 1642 | } |
---|
| 1643 | +bad: |
---|
1541 | 1644 | if (user_mode(regs)) |
---|
1542 | 1645 | _exception(sig, regs, code, regs->dar); |
---|
1543 | 1646 | else |
---|
.. | .. |
---|
1556 | 1659 | panic("kernel stack overflow"); |
---|
1557 | 1660 | } |
---|
1558 | 1661 | |
---|
1559 | | -void nonrecoverable_exception(struct pt_regs *regs) |
---|
| 1662 | +void stack_overflow_exception(struct pt_regs *regs) |
---|
1560 | 1663 | { |
---|
1561 | | - printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", |
---|
1562 | | - regs->nip, regs->msr); |
---|
1563 | | - debugger(regs); |
---|
1564 | | - die("nonrecoverable exception", regs, SIGKILL); |
---|
| 1664 | + enum ctx_state prev_state = exception_enter(); |
---|
| 1665 | + |
---|
| 1666 | + die("Kernel stack overflow", regs, SIGSEGV); |
---|
| 1667 | + |
---|
| 1668 | + exception_exit(prev_state); |
---|
1565 | 1669 | } |
---|
1566 | 1670 | |
---|
1567 | 1671 | void kernel_fp_unavailable_exception(struct pt_regs *regs) |
---|
.. | .. |
---|
1638 | 1742 | [FSCR_TAR_LG] = "TAR", |
---|
1639 | 1743 | [FSCR_MSGP_LG] = "MSGP", |
---|
1640 | 1744 | [FSCR_SCV_LG] = "SCV", |
---|
| 1745 | + [FSCR_PREFIX_LG] = "PREFIX", |
---|
1641 | 1746 | }; |
---|
1642 | 1747 | char *facility = "unknown"; |
---|
1643 | 1748 | u64 value; |
---|
.. | .. |
---|
1759 | 1864 | * checkpointed FP registers need to be loaded. |
---|
1760 | 1865 | */ |
---|
1761 | 1866 | tm_reclaim_current(TM_CAUSE_FAC_UNAV); |
---|
1762 | | - /* Reclaim didn't save out any FPRs to transact_fprs. */ |
---|
| 1867 | + |
---|
| 1868 | + /* |
---|
| 1869 | + * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and |
---|
| 1870 | + * then it was overwrite by the thr->fp_state by tm_reclaim_thread(). |
---|
| 1871 | + * |
---|
| 1872 | + * At this point, ck{fp,vr}_state contains the exact values we want to |
---|
| 1873 | + * recheckpoint. |
---|
| 1874 | + */ |
---|
1763 | 1875 | |
---|
1764 | 1876 | /* Enable FP for the task: */ |
---|
1765 | 1877 | current->thread.load_fp = 1; |
---|
1766 | 1878 | |
---|
1767 | | - /* This loads and recheckpoints the FP registers from |
---|
1768 | | - * thread.fpr[]. They will remain in registers after the |
---|
1769 | | - * checkpoint so we don't need to reload them after. |
---|
1770 | | - * If VMX is in use, the VRs now hold checkpointed values, |
---|
1771 | | - * so we don't want to load the VRs from the thread_struct. |
---|
| 1879 | + /* |
---|
| 1880 | + * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers. |
---|
1772 | 1881 | */ |
---|
1773 | 1882 | tm_recheckpoint(¤t->thread); |
---|
1774 | 1883 | } |
---|
.. | .. |
---|
1813 | 1922 | } |
---|
1814 | 1923 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
---|
1815 | 1924 | |
---|
1816 | | -void performance_monitor_exception(struct pt_regs *regs) |
---|
| 1925 | +static void performance_monitor_exception_nmi(struct pt_regs *regs) |
---|
1817 | 1926 | { |
---|
| 1927 | + nmi_enter(); |
---|
| 1928 | + |
---|
1818 | 1929 | __this_cpu_inc(irq_stat.pmu_irqs); |
---|
1819 | 1930 | |
---|
1820 | 1931 | perf_irq(regs); |
---|
| 1932 | + |
---|
| 1933 | + nmi_exit(); |
---|
| 1934 | +} |
---|
| 1935 | + |
---|
| 1936 | +static void performance_monitor_exception_async(struct pt_regs *regs) |
---|
| 1937 | +{ |
---|
| 1938 | + irq_enter(); |
---|
| 1939 | + |
---|
| 1940 | + __this_cpu_inc(irq_stat.pmu_irqs); |
---|
| 1941 | + |
---|
| 1942 | + perf_irq(regs); |
---|
| 1943 | + |
---|
| 1944 | + irq_exit(); |
---|
| 1945 | +} |
---|
| 1946 | + |
---|
| 1947 | +void performance_monitor_exception(struct pt_regs *regs) |
---|
| 1948 | +{ |
---|
| 1949 | + /* |
---|
| 1950 | + * On 64-bit, if perf interrupts hit in a local_irq_disable |
---|
| 1951 | + * (soft-masked) region, we consider them as NMIs. This is required to |
---|
| 1952 | + * prevent hash faults on user addresses when reading callchains (and |
---|
| 1953 | + * looks better from an irq tracing perspective). |
---|
| 1954 | + */ |
---|
| 1955 | + if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) |
---|
| 1956 | + performance_monitor_exception_nmi(regs); |
---|
| 1957 | + else |
---|
| 1958 | + performance_monitor_exception_async(regs); |
---|
1821 | 1959 | } |
---|
1822 | 1960 | |
---|
1823 | 1961 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
---|
.. | .. |
---|
1950 | 2088 | NOKPROBE_SYMBOL(DebugException); |
---|
1951 | 2089 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
---|
1952 | 2090 | |
---|
1953 | | -#if !defined(CONFIG_TAU_INT) |
---|
1954 | | -void TAUException(struct pt_regs *regs) |
---|
1955 | | -{ |
---|
1956 | | - printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", |
---|
1957 | | - regs->nip, regs->msr, regs->trap, print_tainted()); |
---|
1958 | | -} |
---|
1959 | | -#endif /* CONFIG_INT_TAU */ |
---|
1960 | | - |
---|
1961 | 2091 | #ifdef CONFIG_ALTIVEC |
---|
1962 | 2092 | void altivec_assist_exception(struct pt_regs *regs) |
---|
1963 | 2093 | { |
---|
.. | .. |
---|
2015 | 2145 | int code = FPE_FLTUNK; |
---|
2016 | 2146 | int err; |
---|
2017 | 2147 | |
---|
| 2148 | + /* We restore the interrupt state now */ |
---|
| 2149 | + if (!arch_irq_disabled_regs(regs)) |
---|
| 2150 | + local_irq_enable(); |
---|
| 2151 | + |
---|
2018 | 2152 | flush_spe_to_thread(current); |
---|
2019 | 2153 | |
---|
2020 | 2154 | spefscr = current->thread.spefscr; |
---|
.. | .. |
---|
2060 | 2194 | extern int speround_handler(struct pt_regs *regs); |
---|
2061 | 2195 | int err; |
---|
2062 | 2196 | |
---|
| 2197 | + /* We restore the interrupt state now */ |
---|
| 2198 | + if (!arch_irq_disabled_regs(regs)) |
---|
| 2199 | + local_irq_enable(); |
---|
| 2200 | + |
---|
2063 | 2201 | preempt_disable(); |
---|
2064 | 2202 | if (regs->msr & MSR_SPE) |
---|
2065 | 2203 | giveup_spe(current); |
---|
.. | .. |
---|
2095 | 2233 | */ |
---|
2096 | 2234 | void unrecoverable_exception(struct pt_regs *regs) |
---|
2097 | 2235 | { |
---|
2098 | | - printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", |
---|
2099 | | - regs->trap, regs->nip); |
---|
| 2236 | + pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", |
---|
| 2237 | + regs->trap, regs->nip, regs->msr); |
---|
2100 | 2238 | die("Unrecoverable exception", regs, SIGABRT); |
---|
2101 | 2239 | } |
---|
2102 | 2240 | NOKPROBE_SYMBOL(unrecoverable_exception); |
---|
.. | .. |
---|
2184 | 2322 | |
---|
2185 | 2323 | static int __init ppc_warn_emulated_init(void) |
---|
2186 | 2324 | { |
---|
2187 | | - struct dentry *dir, *d; |
---|
| 2325 | + struct dentry *dir; |
---|
2188 | 2326 | unsigned int i; |
---|
2189 | 2327 | struct ppc_emulated_entry *entries = (void *)&ppc_emulated; |
---|
2190 | 2328 | |
---|
2191 | | - if (!powerpc_debugfs_root) |
---|
2192 | | - return -ENODEV; |
---|
2193 | | - |
---|
2194 | 2329 | dir = debugfs_create_dir("emulated_instructions", |
---|
2195 | 2330 | powerpc_debugfs_root); |
---|
2196 | | - if (!dir) |
---|
2197 | | - return -ENOMEM; |
---|
2198 | 2331 | |
---|
2199 | | - d = debugfs_create_u32("do_warn", 0644, dir, |
---|
2200 | | - &ppc_warn_emulated); |
---|
2201 | | - if (!d) |
---|
2202 | | - goto fail; |
---|
| 2332 | + debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated); |
---|
2203 | 2333 | |
---|
2204 | | - for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { |
---|
2205 | | - d = debugfs_create_u32(entries[i].name, 0644, dir, |
---|
2206 | | - (u32 *)&entries[i].val.counter); |
---|
2207 | | - if (!d) |
---|
2208 | | - goto fail; |
---|
2209 | | - } |
---|
| 2334 | + for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) |
---|
| 2335 | + debugfs_create_u32(entries[i].name, 0644, dir, |
---|
| 2336 | + (u32 *)&entries[i].val.counter); |
---|
2210 | 2337 | |
---|
2211 | 2338 | return 0; |
---|
2212 | | - |
---|
2213 | | -fail: |
---|
2214 | | - debugfs_remove_recursive(dir); |
---|
2215 | | - return -ENOMEM; |
---|
2216 | 2339 | } |
---|
2217 | 2340 | |
---|
2218 | 2341 | device_initcall(ppc_warn_emulated_init); |
---|