.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Derived from "arch/i386/kernel/process.c" |
---|
3 | 4 | * Copyright (C) 1995 Linus Torvalds |
---|
.. | .. |
---|
7 | 8 | * |
---|
8 | 9 | * PowerPC version |
---|
9 | 10 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
---|
10 | | - * |
---|
11 | | - * This program is free software; you can redistribute it and/or |
---|
12 | | - * modify it under the terms of the GNU General Public License |
---|
13 | | - * as published by the Free Software Foundation; either version |
---|
14 | | - * 2 of the License, or (at your option) any later version. |
---|
15 | 11 | */ |
---|
16 | 12 | |
---|
17 | 13 | #include <linux/errno.h> |
---|
.. | .. |
---|
43 | 39 | #include <linux/uaccess.h> |
---|
44 | 40 | #include <linux/elf-randomize.h> |
---|
45 | 41 | #include <linux/pkeys.h> |
---|
| 42 | +#include <linux/seq_buf.h> |
---|
46 | 43 | |
---|
47 | | -#include <asm/pgtable.h> |
---|
48 | 44 | #include <asm/io.h> |
---|
49 | 45 | #include <asm/processor.h> |
---|
50 | 46 | #include <asm/mmu.h> |
---|
.. | .. |
---|
65 | 61 | #include <asm/livepatch.h> |
---|
66 | 62 | #include <asm/cpu_has_feature.h> |
---|
67 | 63 | #include <asm/asm-prototypes.h> |
---|
| 64 | +#include <asm/stacktrace.h> |
---|
| 65 | +#include <asm/hw_breakpoint.h> |
---|
68 | 66 | |
---|
69 | 67 | #include <linux/kprobes.h> |
---|
70 | 68 | #include <linux/kdebug.h> |
---|
.. | .. |
---|
118 | 116 | } |
---|
119 | 117 | early_param("ppc_strict_facility_enable", enable_strict_msr_control); |
---|
120 | 118 | |
---|
121 | | -unsigned long msr_check_and_set(unsigned long bits) |
---|
| 119 | +/* notrace because it's called by restore_math */ |
---|
| 120 | +unsigned long notrace msr_check_and_set(unsigned long bits) |
---|
122 | 121 | { |
---|
123 | 122 | unsigned long oldmsr = mfmsr(); |
---|
124 | 123 | unsigned long newmsr; |
---|
125 | 124 | |
---|
126 | 125 | newmsr = oldmsr | bits; |
---|
127 | 126 | |
---|
128 | | -#ifdef CONFIG_VSX |
---|
129 | 127 | if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) |
---|
130 | 128 | newmsr |= MSR_VSX; |
---|
131 | | -#endif |
---|
132 | 129 | |
---|
133 | 130 | if (oldmsr != newmsr) |
---|
134 | 131 | mtmsr_isync(newmsr); |
---|
.. | .. |
---|
137 | 134 | } |
---|
138 | 135 | EXPORT_SYMBOL_GPL(msr_check_and_set); |
---|
139 | 136 | |
---|
140 | | -void __msr_check_and_clear(unsigned long bits) |
---|
| 137 | +/* notrace because it's called by restore_math */ |
---|
| 138 | +void notrace __msr_check_and_clear(unsigned long bits) |
---|
141 | 139 | { |
---|
142 | 140 | unsigned long oldmsr = mfmsr(); |
---|
143 | 141 | unsigned long newmsr; |
---|
144 | 142 | |
---|
145 | 143 | newmsr = oldmsr & ~bits; |
---|
146 | 144 | |
---|
147 | | -#ifdef CONFIG_VSX |
---|
148 | 145 | if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) |
---|
149 | 146 | newmsr &= ~MSR_VSX; |
---|
150 | | -#endif |
---|
151 | 147 | |
---|
152 | 148 | if (oldmsr != newmsr) |
---|
153 | 149 | mtmsr_isync(newmsr); |
---|
.. | .. |
---|
162 | 158 | save_fpu(tsk); |
---|
163 | 159 | msr = tsk->thread.regs->msr; |
---|
164 | 160 | msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); |
---|
165 | | -#ifdef CONFIG_VSX |
---|
166 | 161 | if (cpu_has_feature(CPU_FTR_VSX)) |
---|
167 | 162 | msr &= ~MSR_VSX; |
---|
168 | | -#endif |
---|
169 | 163 | tsk->thread.regs->msr = msr; |
---|
170 | 164 | } |
---|
171 | 165 | |
---|
.. | .. |
---|
235 | 229 | } |
---|
236 | 230 | } |
---|
237 | 231 | EXPORT_SYMBOL(enable_kernel_fp); |
---|
238 | | - |
---|
239 | | -static int restore_fp(struct task_struct *tsk) |
---|
240 | | -{ |
---|
241 | | - if (tsk->thread.load_fp) { |
---|
242 | | - load_fp_state(¤t->thread.fp_state); |
---|
243 | | - current->thread.load_fp++; |
---|
244 | | - return 1; |
---|
245 | | - } |
---|
246 | | - return 0; |
---|
247 | | -} |
---|
248 | 232 | #else |
---|
249 | | -static int restore_fp(struct task_struct *tsk) { return 0; } |
---|
| 233 | +static inline void __giveup_fpu(struct task_struct *tsk) { } |
---|
250 | 234 | #endif /* CONFIG_PPC_FPU */ |
---|
251 | 235 | |
---|
252 | 236 | #ifdef CONFIG_ALTIVEC |
---|
253 | | -#define loadvec(thr) ((thr).load_vec) |
---|
254 | | - |
---|
255 | 237 | static void __giveup_altivec(struct task_struct *tsk) |
---|
256 | 238 | { |
---|
257 | 239 | unsigned long msr; |
---|
.. | .. |
---|
259 | 241 | save_altivec(tsk); |
---|
260 | 242 | msr = tsk->thread.regs->msr; |
---|
261 | 243 | msr &= ~MSR_VEC; |
---|
262 | | -#ifdef CONFIG_VSX |
---|
263 | 244 | if (cpu_has_feature(CPU_FTR_VSX)) |
---|
264 | 245 | msr &= ~MSR_VSX; |
---|
265 | | -#endif |
---|
266 | 246 | tsk->thread.regs->msr = msr; |
---|
267 | 247 | } |
---|
268 | 248 | |
---|
.. | .. |
---|
317 | 297 | } |
---|
318 | 298 | } |
---|
319 | 299 | EXPORT_SYMBOL_GPL(flush_altivec_to_thread); |
---|
320 | | - |
---|
321 | | -static int restore_altivec(struct task_struct *tsk) |
---|
322 | | -{ |
---|
323 | | - if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { |
---|
324 | | - load_vr_state(&tsk->thread.vr_state); |
---|
325 | | - tsk->thread.used_vr = 1; |
---|
326 | | - tsk->thread.load_vec++; |
---|
327 | | - |
---|
328 | | - return 1; |
---|
329 | | - } |
---|
330 | | - return 0; |
---|
331 | | -} |
---|
332 | | -#else |
---|
333 | | -#define loadvec(thr) 0 |
---|
334 | | -static inline int restore_altivec(struct task_struct *tsk) { return 0; } |
---|
335 | 300 | #endif /* CONFIG_ALTIVEC */ |
---|
336 | 301 | |
---|
337 | 302 | #ifdef CONFIG_VSX |
---|
.. | .. |
---|
399 | 364 | } |
---|
400 | 365 | } |
---|
401 | 366 | EXPORT_SYMBOL_GPL(flush_vsx_to_thread); |
---|
402 | | - |
---|
403 | | -static int restore_vsx(struct task_struct *tsk) |
---|
404 | | -{ |
---|
405 | | - if (cpu_has_feature(CPU_FTR_VSX)) { |
---|
406 | | - tsk->thread.used_vsr = 1; |
---|
407 | | - return 1; |
---|
408 | | - } |
---|
409 | | - |
---|
410 | | - return 0; |
---|
411 | | -} |
---|
412 | | -#else |
---|
413 | | -static inline int restore_vsx(struct task_struct *tsk) { return 0; } |
---|
414 | 367 | #endif /* CONFIG_VSX */ |
---|
415 | 368 | |
---|
416 | 369 | #ifdef CONFIG_SPE |
---|
.. | .. |
---|
455 | 408 | |
---|
456 | 409 | static int __init init_msr_all_available(void) |
---|
457 | 410 | { |
---|
458 | | -#ifdef CONFIG_PPC_FPU |
---|
459 | | - msr_all_available |= MSR_FP; |
---|
460 | | -#endif |
---|
461 | | -#ifdef CONFIG_ALTIVEC |
---|
| 411 | + if (IS_ENABLED(CONFIG_PPC_FPU)) |
---|
| 412 | + msr_all_available |= MSR_FP; |
---|
462 | 413 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
---|
463 | 414 | msr_all_available |= MSR_VEC; |
---|
464 | | -#endif |
---|
465 | | -#ifdef CONFIG_VSX |
---|
466 | 415 | if (cpu_has_feature(CPU_FTR_VSX)) |
---|
467 | 416 | msr_all_available |= MSR_VSX; |
---|
468 | | -#endif |
---|
469 | | -#ifdef CONFIG_SPE |
---|
470 | 417 | if (cpu_has_feature(CPU_FTR_SPE)) |
---|
471 | 418 | msr_all_available |= MSR_SPE; |
---|
472 | | -#endif |
---|
473 | 419 | |
---|
474 | 420 | return 0; |
---|
475 | 421 | } |
---|
.. | .. |
---|
493 | 439 | |
---|
494 | 440 | WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); |
---|
495 | 441 | |
---|
496 | | -#ifdef CONFIG_PPC_FPU |
---|
497 | 442 | if (usermsr & MSR_FP) |
---|
498 | 443 | __giveup_fpu(tsk); |
---|
499 | | -#endif |
---|
500 | | -#ifdef CONFIG_ALTIVEC |
---|
501 | 444 | if (usermsr & MSR_VEC) |
---|
502 | 445 | __giveup_altivec(tsk); |
---|
503 | | -#endif |
---|
504 | | -#ifdef CONFIG_SPE |
---|
505 | 446 | if (usermsr & MSR_SPE) |
---|
506 | 447 | __giveup_spe(tsk); |
---|
507 | | -#endif |
---|
508 | 448 | |
---|
509 | 449 | msr_check_and_clear(msr_all_available); |
---|
510 | 450 | } |
---|
511 | 451 | EXPORT_SYMBOL(giveup_all); |
---|
512 | 452 | |
---|
513 | | -void restore_math(struct pt_regs *regs) |
---|
| 453 | +#ifdef CONFIG_PPC_BOOK3S_64 |
---|
| 454 | +#ifdef CONFIG_PPC_FPU |
---|
| 455 | +static bool should_restore_fp(void) |
---|
| 456 | +{ |
---|
| 457 | + if (current->thread.load_fp) { |
---|
| 458 | + current->thread.load_fp++; |
---|
| 459 | + return true; |
---|
| 460 | + } |
---|
| 461 | + return false; |
---|
| 462 | +} |
---|
| 463 | + |
---|
| 464 | +static void do_restore_fp(void) |
---|
| 465 | +{ |
---|
| 466 | + load_fp_state(¤t->thread.fp_state); |
---|
| 467 | +} |
---|
| 468 | +#else |
---|
| 469 | +static bool should_restore_fp(void) { return false; } |
---|
| 470 | +static void do_restore_fp(void) { } |
---|
| 471 | +#endif /* CONFIG_PPC_FPU */ |
---|
| 472 | + |
---|
| 473 | +#ifdef CONFIG_ALTIVEC |
---|
| 474 | +static bool should_restore_altivec(void) |
---|
| 475 | +{ |
---|
| 476 | + if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) { |
---|
| 477 | + current->thread.load_vec++; |
---|
| 478 | + return true; |
---|
| 479 | + } |
---|
| 480 | + return false; |
---|
| 481 | +} |
---|
| 482 | + |
---|
| 483 | +static void do_restore_altivec(void) |
---|
| 484 | +{ |
---|
| 485 | + load_vr_state(¤t->thread.vr_state); |
---|
| 486 | + current->thread.used_vr = 1; |
---|
| 487 | +} |
---|
| 488 | +#else |
---|
| 489 | +static bool should_restore_altivec(void) { return false; } |
---|
| 490 | +static void do_restore_altivec(void) { } |
---|
| 491 | +#endif /* CONFIG_ALTIVEC */ |
---|
| 492 | + |
---|
| 493 | +static bool should_restore_vsx(void) |
---|
| 494 | +{ |
---|
| 495 | + if (cpu_has_feature(CPU_FTR_VSX)) |
---|
| 496 | + return true; |
---|
| 497 | + return false; |
---|
| 498 | +} |
---|
| 499 | +#ifdef CONFIG_VSX |
---|
| 500 | +static void do_restore_vsx(void) |
---|
| 501 | +{ |
---|
| 502 | + current->thread.used_vsr = 1; |
---|
| 503 | +} |
---|
| 504 | +#else |
---|
| 505 | +static void do_restore_vsx(void) { } |
---|
| 506 | +#endif /* CONFIG_VSX */ |
---|
| 507 | + |
---|
| 508 | +/* |
---|
| 509 | + * The exception exit path calls restore_math() with interrupts hard disabled |
---|
| 510 | + * but the soft irq state not "reconciled". ftrace code that calls |
---|
| 511 | + * local_irq_save/restore causes warnings. |
---|
| 512 | + * |
---|
| 513 | + * Rather than complicate the exit path, just don't trace restore_math. This |
---|
| 514 | + * could be done by having ftrace entry code check for this un-reconciled |
---|
| 515 | + * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and |
---|
| 516 | + * temporarily fix it up for the duration of the ftrace call. |
---|
| 517 | + */ |
---|
| 518 | +void notrace restore_math(struct pt_regs *regs) |
---|
514 | 519 | { |
---|
515 | 520 | unsigned long msr; |
---|
516 | | - |
---|
517 | | - if (!MSR_TM_ACTIVE(regs->msr) && |
---|
518 | | - !current->thread.load_fp && !loadvec(current->thread)) |
---|
519 | | - return; |
---|
| 521 | + unsigned long new_msr = 0; |
---|
520 | 522 | |
---|
521 | 523 | msr = regs->msr; |
---|
522 | | - msr_check_and_set(msr_all_available); |
---|
523 | 524 | |
---|
524 | 525 | /* |
---|
525 | | - * Only reload if the bit is not set in the user MSR, the bit BEING set |
---|
526 | | - * indicates that the registers are hot |
---|
| 526 | + * new_msr tracks the facilities that are to be restored. Only reload |
---|
| 527 | + * if the bit is not set in the user MSR (if it is set, the registers |
---|
| 528 | + * are live for the user thread). |
---|
527 | 529 | */ |
---|
528 | | - if ((!(msr & MSR_FP)) && restore_fp(current)) |
---|
529 | | - msr |= MSR_FP | current->thread.fpexc_mode; |
---|
| 530 | + if ((!(msr & MSR_FP)) && should_restore_fp()) |
---|
| 531 | + new_msr |= MSR_FP; |
---|
530 | 532 | |
---|
531 | | - if ((!(msr & MSR_VEC)) && restore_altivec(current)) |
---|
532 | | - msr |= MSR_VEC; |
---|
| 533 | + if ((!(msr & MSR_VEC)) && should_restore_altivec()) |
---|
| 534 | + new_msr |= MSR_VEC; |
---|
533 | 535 | |
---|
534 | | - if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) && |
---|
535 | | - restore_vsx(current)) { |
---|
536 | | - msr |= MSR_VSX; |
---|
| 536 | + if ((!(msr & MSR_VSX)) && should_restore_vsx()) { |
---|
| 537 | + if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) |
---|
| 538 | + new_msr |= MSR_VSX; |
---|
537 | 539 | } |
---|
538 | 540 | |
---|
539 | | - msr_check_and_clear(msr_all_available); |
---|
| 541 | + if (new_msr) { |
---|
| 542 | + unsigned long fpexc_mode = 0; |
---|
540 | 543 | |
---|
541 | | - regs->msr = msr; |
---|
| 544 | + msr_check_and_set(new_msr); |
---|
| 545 | + |
---|
| 546 | + if (new_msr & MSR_FP) { |
---|
| 547 | + do_restore_fp(); |
---|
| 548 | + |
---|
| 549 | + // This also covers VSX, because VSX implies FP |
---|
| 550 | + fpexc_mode = current->thread.fpexc_mode; |
---|
| 551 | + } |
---|
| 552 | + |
---|
| 553 | + if (new_msr & MSR_VEC) |
---|
| 554 | + do_restore_altivec(); |
---|
| 555 | + |
---|
| 556 | + if (new_msr & MSR_VSX) |
---|
| 557 | + do_restore_vsx(); |
---|
| 558 | + |
---|
| 559 | + msr_check_and_clear(new_msr); |
---|
| 560 | + |
---|
| 561 | + regs->msr |= new_msr | fpexc_mode; |
---|
| 562 | + } |
---|
542 | 563 | } |
---|
| 564 | +#endif /* CONFIG_PPC_BOOK3S_64 */ |
---|
543 | 565 | |
---|
544 | 566 | static void save_all(struct task_struct *tsk) |
---|
545 | 567 | { |
---|
.. | .. |
---|
600 | 622 | (void __user *)address); |
---|
601 | 623 | } |
---|
602 | 624 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
---|
| 625 | + |
---|
| 626 | +static void do_break_handler(struct pt_regs *regs) |
---|
| 627 | +{ |
---|
| 628 | + struct arch_hw_breakpoint null_brk = {0}; |
---|
| 629 | + struct arch_hw_breakpoint *info; |
---|
| 630 | + struct ppc_inst instr = ppc_inst(0); |
---|
| 631 | + int type = 0; |
---|
| 632 | + int size = 0; |
---|
| 633 | + unsigned long ea; |
---|
| 634 | + int i; |
---|
| 635 | + |
---|
| 636 | + /* |
---|
| 637 | + * If underneath hw supports only one watchpoint, we know it |
---|
| 638 | + * caused exception. 8xx also falls into this category. |
---|
| 639 | + */ |
---|
| 640 | + if (nr_wp_slots() == 1) { |
---|
| 641 | + __set_breakpoint(0, &null_brk); |
---|
| 642 | + current->thread.hw_brk[0] = null_brk; |
---|
| 643 | + current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED; |
---|
| 644 | + return; |
---|
| 645 | + } |
---|
| 646 | + |
---|
| 647 | + /* Otherwise findout which DAWR caused exception and disable it. */ |
---|
| 648 | + wp_get_instr_detail(regs, &instr, &type, &size, &ea); |
---|
| 649 | + |
---|
| 650 | + for (i = 0; i < nr_wp_slots(); i++) { |
---|
| 651 | + info = ¤t->thread.hw_brk[i]; |
---|
| 652 | + if (!info->address) |
---|
| 653 | + continue; |
---|
| 654 | + |
---|
| 655 | + if (wp_check_constraints(regs, instr, ea, type, size, info)) { |
---|
| 656 | + __set_breakpoint(i, &null_brk); |
---|
| 657 | + current->thread.hw_brk[i] = null_brk; |
---|
| 658 | + current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED; |
---|
| 659 | + } |
---|
| 660 | + } |
---|
| 661 | +} |
---|
| 662 | + |
---|
603 | 663 | void do_break (struct pt_regs *regs, unsigned long address, |
---|
604 | 664 | unsigned long error_code) |
---|
605 | 665 | { |
---|
606 | | - siginfo_t info; |
---|
607 | | - |
---|
608 | 666 | current->thread.trap_nr = TRAP_HWBKPT; |
---|
609 | 667 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, |
---|
610 | 668 | 11, SIGSEGV) == NOTIFY_STOP) |
---|
.. | .. |
---|
613 | 671 | if (debugger_break_match(regs)) |
---|
614 | 672 | return; |
---|
615 | 673 | |
---|
616 | | - /* Clear the breakpoint */ |
---|
617 | | - hw_breakpoint_disable(); |
---|
| 674 | + /* |
---|
| 675 | + * We reach here only when watchpoint exception is generated by ptrace |
---|
| 676 | + * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set, |
---|
| 677 | + * watchpoint is already handled by hw_breakpoint_handler() so we don't |
---|
| 678 | + * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set, |
---|
| 679 | + * we need to manually handle the watchpoint here. |
---|
| 680 | + */ |
---|
| 681 | + if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT)) |
---|
| 682 | + do_break_handler(regs); |
---|
618 | 683 | |
---|
619 | 684 | /* Deliver the signal to userspace */ |
---|
620 | | - clear_siginfo(&info); |
---|
621 | | - info.si_signo = SIGTRAP; |
---|
622 | | - info.si_errno = 0; |
---|
623 | | - info.si_code = TRAP_HWBKPT; |
---|
624 | | - info.si_addr = (void __user *)address; |
---|
625 | | - force_sig_info(SIGTRAP, &info, current); |
---|
| 685 | + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address); |
---|
626 | 686 | } |
---|
627 | 687 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
---|
628 | 688 | |
---|
629 | | -static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); |
---|
| 689 | +static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]); |
---|
630 | 690 | |
---|
631 | 691 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
---|
632 | 692 | /* |
---|
.. | .. |
---|
700 | 760 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
---|
701 | 761 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
---|
702 | 762 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
---|
703 | | -static void set_breakpoint(struct arch_hw_breakpoint *brk) |
---|
| 763 | +static void set_breakpoint(int i, struct arch_hw_breakpoint *brk) |
---|
704 | 764 | { |
---|
705 | 765 | preempt_disable(); |
---|
706 | | - __set_breakpoint(brk); |
---|
| 766 | + __set_breakpoint(i, brk); |
---|
707 | 767 | preempt_enable(); |
---|
708 | 768 | } |
---|
709 | 769 | |
---|
710 | 770 | static void set_debug_reg_defaults(struct thread_struct *thread) |
---|
711 | 771 | { |
---|
712 | | - thread->hw_brk.address = 0; |
---|
713 | | - thread->hw_brk.type = 0; |
---|
714 | | - if (ppc_breakpoint_available()) |
---|
715 | | - set_breakpoint(&thread->hw_brk); |
---|
| 772 | + int i; |
---|
| 773 | + struct arch_hw_breakpoint null_brk = {0}; |
---|
| 774 | + |
---|
| 775 | + for (i = 0; i < nr_wp_slots(); i++) { |
---|
| 776 | + thread->hw_brk[i] = null_brk; |
---|
| 777 | + if (ppc_breakpoint_available()) |
---|
| 778 | + set_breakpoint(i, &thread->hw_brk[i]); |
---|
| 779 | + } |
---|
| 780 | +} |
---|
| 781 | + |
---|
| 782 | +static inline bool hw_brk_match(struct arch_hw_breakpoint *a, |
---|
| 783 | + struct arch_hw_breakpoint *b) |
---|
| 784 | +{ |
---|
| 785 | + if (a->address != b->address) |
---|
| 786 | + return false; |
---|
| 787 | + if (a->type != b->type) |
---|
| 788 | + return false; |
---|
| 789 | + if (a->len != b->len) |
---|
| 790 | + return false; |
---|
| 791 | + /* no need to check hw_len. it's calculated from address and len */ |
---|
| 792 | + return true; |
---|
| 793 | +} |
---|
| 794 | + |
---|
| 795 | +static void switch_hw_breakpoint(struct task_struct *new) |
---|
| 796 | +{ |
---|
| 797 | + int i; |
---|
| 798 | + |
---|
| 799 | + for (i = 0; i < nr_wp_slots(); i++) { |
---|
| 800 | + if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]), |
---|
| 801 | + &new->thread.hw_brk[i]))) |
---|
| 802 | + continue; |
---|
| 803 | + |
---|
| 804 | + __set_breakpoint(i, &new->thread.hw_brk[i]); |
---|
| 805 | + } |
---|
716 | 806 | } |
---|
717 | 807 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ |
---|
718 | 808 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
---|
.. | .. |
---|
721 | 811 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
---|
722 | 812 | { |
---|
723 | 813 | mtspr(SPRN_DAC1, dabr); |
---|
724 | | -#ifdef CONFIG_PPC_47x |
---|
725 | | - isync(); |
---|
726 | | -#endif |
---|
| 814 | + if (IS_ENABLED(CONFIG_PPC_47x)) |
---|
| 815 | + isync(); |
---|
727 | 816 | return 0; |
---|
728 | 817 | } |
---|
729 | 818 | #elif defined(CONFIG_PPC_BOOK3S) |
---|
.. | .. |
---|
732 | 821 | mtspr(SPRN_DABR, dabr); |
---|
733 | 822 | if (cpu_has_feature(CPU_FTR_DABRX)) |
---|
734 | 823 | mtspr(SPRN_DABRX, dabrx); |
---|
735 | | - return 0; |
---|
736 | | -} |
---|
737 | | -#elif defined(CONFIG_PPC_8xx) |
---|
738 | | -static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
---|
739 | | -{ |
---|
740 | | - unsigned long addr = dabr & ~HW_BRK_TYPE_DABR; |
---|
741 | | - unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */ |
---|
742 | | - unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */ |
---|
743 | | - |
---|
744 | | - if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ) |
---|
745 | | - lctrl1 |= 0xa0000; |
---|
746 | | - else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE) |
---|
747 | | - lctrl1 |= 0xf0000; |
---|
748 | | - else if ((dabr & HW_BRK_TYPE_RDWR) == 0) |
---|
749 | | - lctrl2 = 0; |
---|
750 | | - |
---|
751 | | - mtspr(SPRN_LCTRL2, 0); |
---|
752 | | - mtspr(SPRN_CMPE, addr); |
---|
753 | | - mtspr(SPRN_CMPF, addr + 4); |
---|
754 | | - mtspr(SPRN_LCTRL1, lctrl1); |
---|
755 | | - mtspr(SPRN_LCTRL2, lctrl2); |
---|
756 | | - |
---|
757 | 824 | return 0; |
---|
758 | 825 | } |
---|
759 | 826 | #else |
---|
.. | .. |
---|
776 | 843 | return __set_dabr(dabr, dabrx); |
---|
777 | 844 | } |
---|
778 | 845 | |
---|
779 | | -static inline int set_dawr(struct arch_hw_breakpoint *brk) |
---|
| 846 | +static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk) |
---|
780 | 847 | { |
---|
781 | | - unsigned long dawr, dawrx, mrd; |
---|
| 848 | + unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW | |
---|
| 849 | + LCTRL1_CRWF_RW; |
---|
| 850 | + unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN; |
---|
| 851 | + unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE); |
---|
| 852 | + unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE); |
---|
782 | 853 | |
---|
783 | | - dawr = brk->address; |
---|
| 854 | + if (start_addr == 0) |
---|
| 855 | + lctrl2 |= LCTRL2_LW0LA_F; |
---|
| 856 | + else if (end_addr == 0) |
---|
| 857 | + lctrl2 |= LCTRL2_LW0LA_E; |
---|
| 858 | + else |
---|
| 859 | + lctrl2 |= LCTRL2_LW0LA_EandF; |
---|
784 | 860 | |
---|
785 | | - dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ |
---|
786 | | - << (63 - 58); //* read/write bits */ |
---|
787 | | - dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ |
---|
788 | | - << (63 - 59); //* translate */ |
---|
789 | | - dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ |
---|
790 | | - >> 3; //* PRIM bits */ |
---|
791 | | - /* dawr length is stored in field MDR bits 48:53. Matches range in |
---|
792 | | - doublewords (64 bits) baised by -1 eg. 0b000000=1DW and |
---|
793 | | - 0b111111=64DW. |
---|
794 | | - brk->len is in bytes. |
---|
795 | | - This aligns up to double word size, shifts and does the bias. |
---|
796 | | - */ |
---|
797 | | - mrd = ((brk->len + 7) >> 3) - 1; |
---|
798 | | - dawrx |= (mrd & 0x3f) << (63 - 53); |
---|
| 861 | + mtspr(SPRN_LCTRL2, 0); |
---|
799 | 862 | |
---|
800 | | - if (ppc_md.set_dawr) |
---|
801 | | - return ppc_md.set_dawr(dawr, dawrx); |
---|
802 | | - mtspr(SPRN_DAWR, dawr); |
---|
803 | | - mtspr(SPRN_DAWRX, dawrx); |
---|
| 863 | + if ((brk->type & HW_BRK_TYPE_RDWR) == 0) |
---|
| 864 | + return 0; |
---|
| 865 | + |
---|
| 866 | + if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ) |
---|
| 867 | + lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO; |
---|
| 868 | + if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE) |
---|
| 869 | + lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO; |
---|
| 870 | + |
---|
| 871 | + mtspr(SPRN_CMPE, start_addr - 1); |
---|
| 872 | + mtspr(SPRN_CMPF, end_addr); |
---|
| 873 | + mtspr(SPRN_LCTRL1, lctrl1); |
---|
| 874 | + mtspr(SPRN_LCTRL2, lctrl2); |
---|
| 875 | + |
---|
804 | 876 | return 0; |
---|
805 | 877 | } |
---|
806 | 878 | |
---|
807 | | -void __set_breakpoint(struct arch_hw_breakpoint *brk) |
---|
| 879 | +void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk) |
---|
808 | 880 | { |
---|
809 | | - memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); |
---|
| 881 | + memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk)); |
---|
810 | 882 | |
---|
811 | | - if (cpu_has_feature(CPU_FTR_DAWR)) |
---|
| 883 | + if (dawr_enabled()) |
---|
812 | 884 | // Power8 or later |
---|
813 | | - set_dawr(brk); |
---|
| 885 | + set_dawr(nr, brk); |
---|
| 886 | + else if (IS_ENABLED(CONFIG_PPC_8xx)) |
---|
| 887 | + set_breakpoint_8xx(brk); |
---|
814 | 888 | else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) |
---|
815 | 889 | // Power7 or earlier |
---|
816 | 890 | set_dabr(brk); |
---|
.. | .. |
---|
822 | 896 | /* Check if we have DAWR or DABR hardware */ |
---|
823 | 897 | bool ppc_breakpoint_available(void) |
---|
824 | 898 | { |
---|
825 | | - if (cpu_has_feature(CPU_FTR_DAWR)) |
---|
826 | | - return true; /* POWER8 DAWR */ |
---|
| 899 | + if (dawr_enabled()) |
---|
| 900 | + return true; /* POWER8 DAWR or POWER9 forced DAWR */ |
---|
827 | 901 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
---|
828 | 902 | return false; /* POWER9 with DAWR disabled */ |
---|
829 | 903 | /* DABR: Everything but POWER8 and POWER9 */ |
---|
830 | 904 | return true; |
---|
831 | 905 | } |
---|
832 | 906 | EXPORT_SYMBOL_GPL(ppc_breakpoint_available); |
---|
833 | | - |
---|
834 | | -static inline bool hw_brk_match(struct arch_hw_breakpoint *a, |
---|
835 | | - struct arch_hw_breakpoint *b) |
---|
836 | | -{ |
---|
837 | | - if (a->address != b->address) |
---|
838 | | - return false; |
---|
839 | | - if (a->type != b->type) |
---|
840 | | - return false; |
---|
841 | | - if (a->len != b->len) |
---|
842 | | - return false; |
---|
843 | | - return true; |
---|
844 | | -} |
---|
845 | 907 | |
---|
846 | 908 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
---|
847 | 909 | |
---|
.. | .. |
---|
1143 | 1205 | thread_pkey_regs_restore(new_thread, old_thread); |
---|
1144 | 1206 | } |
---|
1145 | 1207 | |
---|
1146 | | -#ifdef CONFIG_PPC_BOOK3S_64 |
---|
1147 | | -#define CP_SIZE 128 |
---|
1148 | | -static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE))); |
---|
1149 | | -#endif |
---|
1150 | | - |
---|
1151 | 1208 | struct task_struct *__switch_to(struct task_struct *prev, |
---|
1152 | 1209 | struct task_struct *new) |
---|
1153 | 1210 | { |
---|
.. | .. |
---|
1170 | 1227 | __flush_tlb_pending(batch); |
---|
1171 | 1228 | batch->active = 0; |
---|
1172 | 1229 | } |
---|
| 1230 | + |
---|
| 1231 | + /* |
---|
| 1232 | + * On POWER9 the copy-paste buffer can only paste into |
---|
| 1233 | + * foreign real addresses, so unprivileged processes can not |
---|
| 1234 | + * see the data or use it in any way unless they have |
---|
| 1235 | + * foreign real mappings. If the new process has the foreign |
---|
| 1236 | + * real address mappings, we must issue a cp_abort to clear |
---|
| 1237 | + * any state and prevent snooping, corruption or a covert |
---|
| 1238 | + * channel. ISA v3.1 supports paste into local memory. |
---|
| 1239 | + */ |
---|
| 1240 | + if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) || |
---|
| 1241 | + atomic_read(&new->mm->context.vas_windows))) |
---|
| 1242 | + asm volatile(PPC_CP_ABORT); |
---|
1173 | 1243 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
---|
1174 | 1244 | |
---|
1175 | 1245 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
---|
.. | .. |
---|
1180 | 1250 | * schedule DABR |
---|
1181 | 1251 | */ |
---|
1182 | 1252 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
---|
1183 | | - if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) |
---|
1184 | | - __set_breakpoint(&new->thread.hw_brk); |
---|
| 1253 | + switch_hw_breakpoint(new); |
---|
1185 | 1254 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
---|
1186 | 1255 | #endif |
---|
1187 | 1256 | |
---|
.. | .. |
---|
1216 | 1285 | |
---|
1217 | 1286 | last = _switch(old_thread, new_thread); |
---|
1218 | 1287 | |
---|
| 1288 | + /* |
---|
| 1289 | + * Nothing after _switch will be run for newly created tasks, |
---|
| 1290 | + * because they switch directly to ret_from_fork/ret_from_kernel_thread |
---|
| 1291 | + * etc. Code added here should have a comment explaining why that is |
---|
| 1292 | + * okay. |
---|
| 1293 | + */ |
---|
| 1294 | + |
---|
1219 | 1295 | #ifdef CONFIG_PPC_BOOK3S_64 |
---|
| 1296 | + /* |
---|
| 1297 | + * This applies to a process that was context switched while inside |
---|
| 1298 | + * arch_enter_lazy_mmu_mode(), to re-activate the batch that was |
---|
| 1299 | + * deactivated above, before _switch(). This will never be the case |
---|
| 1300 | + * for new tasks. |
---|
| 1301 | + */ |
---|
1220 | 1302 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { |
---|
1221 | 1303 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; |
---|
1222 | 1304 | batch = this_cpu_ptr(&ppc64_tlb_batch); |
---|
1223 | 1305 | batch->active = 1; |
---|
1224 | 1306 | } |
---|
1225 | 1307 | |
---|
1226 | | - if (current_thread_info()->task->thread.regs) { |
---|
1227 | | - restore_math(current_thread_info()->task->thread.regs); |
---|
1228 | | - |
---|
1229 | | - /* |
---|
1230 | | - * The copy-paste buffer can only store into foreign real |
---|
1231 | | - * addresses, so unprivileged processes can not see the |
---|
1232 | | - * data or use it in any way unless they have foreign real |
---|
1233 | | - * mappings. If the new process has the foreign real address |
---|
1234 | | - * mappings, we must issue a cp_abort to clear any state and |
---|
1235 | | - * prevent snooping, corruption or a covert channel. |
---|
1236 | | - */ |
---|
1237 | | - if (current_thread_info()->task->thread.used_vas) |
---|
1238 | | - asm volatile(PPC_CP_ABORT); |
---|
1239 | | - } |
---|
| 1308 | + /* |
---|
| 1309 | + * Math facilities are masked out of the child MSR in copy_thread. |
---|
| 1310 | + * A new task does not need to restore_math because it will |
---|
| 1311 | + * demand fault them. |
---|
| 1312 | + */ |
---|
| 1313 | + if (current->thread.regs) |
---|
| 1314 | + restore_math(current->thread.regs); |
---|
1240 | 1315 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
---|
1241 | 1316 | |
---|
1242 | 1317 | return last; |
---|
1243 | 1318 | } |
---|
1244 | 1319 | |
---|
1245 | | -static int instructions_to_print = 16; |
---|
| 1320 | +#define NR_INSN_TO_PRINT 16 |
---|
1246 | 1321 | |
---|
1247 | 1322 | static void show_instructions(struct pt_regs *regs) |
---|
1248 | 1323 | { |
---|
1249 | 1324 | int i; |
---|
1250 | | - unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * |
---|
1251 | | - sizeof(int)); |
---|
| 1325 | + unsigned long nip = regs->nip; |
---|
| 1326 | + unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); |
---|
1252 | 1327 | |
---|
1253 | 1328 | printk("Instruction dump:"); |
---|
1254 | 1329 | |
---|
1255 | | - for (i = 0; i < instructions_to_print; i++) { |
---|
| 1330 | + /* |
---|
| 1331 | + * If we were executing with the MMU off for instructions, adjust pc |
---|
| 1332 | + * rather than printing XXXXXXXX. |
---|
| 1333 | + */ |
---|
| 1334 | + if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) { |
---|
| 1335 | + pc = (unsigned long)phys_to_virt(pc); |
---|
| 1336 | + nip = (unsigned long)phys_to_virt(regs->nip); |
---|
| 1337 | + } |
---|
| 1338 | + |
---|
| 1339 | + for (i = 0; i < NR_INSN_TO_PRINT; i++) { |
---|
1256 | 1340 | int instr; |
---|
1257 | 1341 | |
---|
1258 | 1342 | if (!(i % 8)) |
---|
1259 | 1343 | pr_cont("\n"); |
---|
1260 | 1344 | |
---|
1261 | | -#if !defined(CONFIG_BOOKE) |
---|
1262 | | - /* If executing with the IMMU off, adjust pc rather |
---|
1263 | | - * than print XXXXXXXX. |
---|
1264 | | - */ |
---|
1265 | | - if (!(regs->msr & MSR_IR)) |
---|
1266 | | - pc = (unsigned long)phys_to_virt(pc); |
---|
1267 | | -#endif |
---|
1268 | | - |
---|
1269 | 1345 | if (!__kernel_text_address(pc) || |
---|
1270 | | - probe_kernel_address((unsigned int __user *)pc, instr)) { |
---|
| 1346 | + get_kernel_nofault(instr, (const void *)pc)) { |
---|
1271 | 1347 | pr_cont("XXXXXXXX "); |
---|
1272 | 1348 | } else { |
---|
1273 | | - if (regs->nip == pc) |
---|
| 1349 | + if (nip == pc) |
---|
1274 | 1350 | pr_cont("<%08x> ", instr); |
---|
1275 | 1351 | else |
---|
1276 | 1352 | pr_cont("%08x ", instr); |
---|
.. | .. |
---|
1285 | 1361 | void show_user_instructions(struct pt_regs *regs) |
---|
1286 | 1362 | { |
---|
1287 | 1363 | unsigned long pc; |
---|
1288 | | - int i; |
---|
| 1364 | + int n = NR_INSN_TO_PRINT; |
---|
| 1365 | + struct seq_buf s; |
---|
| 1366 | + char buf[96]; /* enough for 8 times 9 + 2 chars */ |
---|
1289 | 1367 | |
---|
1290 | | - pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); |
---|
| 1368 | + pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); |
---|
1291 | 1369 | |
---|
1292 | | - /* |
---|
1293 | | - * Make sure the NIP points at userspace, not kernel text/data or |
---|
1294 | | - * elsewhere. |
---|
1295 | | - */ |
---|
1296 | | - if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) { |
---|
1297 | | - pr_info("%s[%d]: Bad NIP, not dumping instructions.\n", |
---|
1298 | | - current->comm, current->pid); |
---|
1299 | | - return; |
---|
1300 | | - } |
---|
| 1370 | + seq_buf_init(&s, buf, sizeof(buf)); |
---|
1301 | 1371 | |
---|
1302 | | - pr_info("%s[%d]: code: ", current->comm, current->pid); |
---|
| 1372 | + while (n) { |
---|
| 1373 | + int i; |
---|
1303 | 1374 | |
---|
1304 | | - for (i = 0; i < instructions_to_print; i++) { |
---|
1305 | | - int instr; |
---|
| 1375 | + seq_buf_clear(&s); |
---|
1306 | 1376 | |
---|
1307 | | - if (!(i % 8) && (i > 0)) { |
---|
1308 | | - pr_cont("\n"); |
---|
1309 | | - pr_info("%s[%d]: code: ", current->comm, current->pid); |
---|
| 1377 | + for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) { |
---|
| 1378 | + int instr; |
---|
| 1379 | + |
---|
| 1380 | + if (copy_from_user_nofault(&instr, (void __user *)pc, |
---|
| 1381 | + sizeof(instr))) { |
---|
| 1382 | + seq_buf_printf(&s, "XXXXXXXX "); |
---|
| 1383 | + continue; |
---|
| 1384 | + } |
---|
| 1385 | + seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr); |
---|
1310 | 1386 | } |
---|
1311 | 1387 | |
---|
1312 | | - if (probe_kernel_address((unsigned int __user *)pc, instr)) { |
---|
1313 | | - pr_cont("XXXXXXXX "); |
---|
1314 | | - } else { |
---|
1315 | | - if (regs->nip == pc) |
---|
1316 | | - pr_cont("<%08x> ", instr); |
---|
1317 | | - else |
---|
1318 | | - pr_cont("%08x ", instr); |
---|
1319 | | - } |
---|
1320 | | - |
---|
1321 | | - pc += sizeof(int); |
---|
| 1388 | + if (!seq_buf_has_overflowed(&s)) |
---|
| 1389 | + pr_info("%s[%d]: code: %s\n", current->comm, |
---|
| 1390 | + current->pid, s.buffer); |
---|
1322 | 1391 | } |
---|
1323 | | - |
---|
1324 | | - pr_cont("\n"); |
---|
1325 | 1392 | } |
---|
1326 | 1393 | |
---|
1327 | 1394 | struct regbit { |
---|
.. | .. |
---|
1429 | 1496 | print_msr_bits(regs->msr); |
---|
1430 | 1497 | pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
---|
1431 | 1498 | trap = TRAP(regs); |
---|
1432 | | - if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) |
---|
| 1499 | + if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR)) |
---|
1433 | 1500 | pr_cont("CFAR: "REG" ", regs->orig_gpr3); |
---|
1434 | | - if (trap == 0x200 || trap == 0x300 || trap == 0x600) |
---|
1435 | | -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
---|
1436 | | - pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); |
---|
1437 | | -#else |
---|
1438 | | - pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); |
---|
1439 | | -#endif |
---|
| 1501 | + if (trap == 0x200 || trap == 0x300 || trap == 0x600) { |
---|
| 1502 | + if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE)) |
---|
| 1503 | + pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); |
---|
| 1504 | + else |
---|
| 1505 | + pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); |
---|
| 1506 | + } |
---|
| 1507 | + |
---|
1440 | 1508 | #ifdef CONFIG_PPC64 |
---|
1441 | 1509 | pr_cont("IRQMASK: %lx ", regs->softe); |
---|
1442 | 1510 | #endif |
---|
.. | .. |
---|
1453 | 1521 | break; |
---|
1454 | 1522 | } |
---|
1455 | 1523 | pr_cont("\n"); |
---|
1456 | | -#ifdef CONFIG_KALLSYMS |
---|
1457 | 1524 | /* |
---|
1458 | 1525 | * Lookup NIP late so we have the best change of getting the |
---|
1459 | 1526 | * above info out without failing |
---|
1460 | 1527 | */ |
---|
1461 | | - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); |
---|
1462 | | - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); |
---|
1463 | | -#endif |
---|
1464 | | - show_stack(current, (unsigned long *) regs->gpr[1]); |
---|
| 1528 | + if (IS_ENABLED(CONFIG_KALLSYMS)) { |
---|
| 1529 | + printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); |
---|
| 1530 | + printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); |
---|
| 1531 | + } |
---|
| 1532 | + show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT); |
---|
1465 | 1533 | if (!user_mode(regs)) |
---|
1466 | 1534 | show_instructions(regs); |
---|
1467 | 1535 | } |
---|
.. | .. |
---|
1475 | 1543 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
---|
1476 | 1544 | } |
---|
1477 | 1545 | |
---|
1478 | | -int set_thread_uses_vas(void) |
---|
1479 | | -{ |
---|
1480 | 1546 | #ifdef CONFIG_PPC_BOOK3S_64 |
---|
1481 | | - if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
---|
1482 | | - return -EINVAL; |
---|
1483 | | - |
---|
1484 | | - current->thread.used_vas = 1; |
---|
1485 | | - |
---|
1486 | | - /* |
---|
1487 | | - * Even a process that has no foreign real address mapping can use |
---|
1488 | | - * an unpaired COPY instruction (to no real effect). Issue CP_ABORT |
---|
1489 | | - * to clear any pending COPY and prevent a covert channel. |
---|
1490 | | - * |
---|
1491 | | - * __switch_to() will issue CP_ABORT on future context switches. |
---|
1492 | | - */ |
---|
1493 | | - asm volatile(PPC_CP_ABORT); |
---|
1494 | | - |
---|
1495 | | -#endif /* CONFIG_PPC_BOOK3S_64 */ |
---|
1496 | | - return 0; |
---|
| 1547 | +void arch_setup_new_exec(void) |
---|
| 1548 | +{ |
---|
| 1549 | + if (radix_enabled()) |
---|
| 1550 | + return; |
---|
| 1551 | + hash__setup_new_exec(); |
---|
1497 | 1552 | } |
---|
| 1553 | +#endif |
---|
1498 | 1554 | |
---|
1499 | 1555 | #ifdef CONFIG_PPC64 |
---|
1500 | 1556 | /** |
---|
.. | .. |
---|
1609 | 1665 | * Copy architecture-specific thread state |
---|
1610 | 1666 | */ |
---|
1611 | 1667 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
---|
1612 | | - unsigned long kthread_arg, struct task_struct *p) |
---|
| 1668 | + unsigned long kthread_arg, struct task_struct *p, |
---|
| 1669 | + unsigned long tls) |
---|
1613 | 1670 | { |
---|
1614 | 1671 | struct pt_regs *childregs, *kregs; |
---|
1615 | 1672 | extern void ret_from_fork(void); |
---|
| 1673 | + extern void ret_from_fork_scv(void); |
---|
1616 | 1674 | extern void ret_from_kernel_thread(void); |
---|
1617 | 1675 | void (*f)(void); |
---|
1618 | 1676 | unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; |
---|
1619 | 1677 | struct thread_info *ti = task_thread_info(p); |
---|
| 1678 | +#ifdef CONFIG_HAVE_HW_BREAKPOINT |
---|
| 1679 | + int i; |
---|
| 1680 | +#endif |
---|
1620 | 1681 | |
---|
1621 | | - klp_init_thread_info(ti); |
---|
| 1682 | + klp_init_thread_info(p); |
---|
1622 | 1683 | |
---|
1623 | 1684 | /* Copy registers */ |
---|
1624 | 1685 | sp -= sizeof(struct pt_regs); |
---|
1625 | 1686 | childregs = (struct pt_regs *) sp; |
---|
1626 | | - if (unlikely(p->flags & PF_KTHREAD)) { |
---|
| 1687 | + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { |
---|
1627 | 1688 | /* kernel thread */ |
---|
1628 | 1689 | memset(childregs, 0, sizeof(struct pt_regs)); |
---|
1629 | 1690 | childregs->gpr[1] = sp + sizeof(struct pt_regs); |
---|
.. | .. |
---|
1646 | 1707 | if (usp) |
---|
1647 | 1708 | childregs->gpr[1] = usp; |
---|
1648 | 1709 | p->thread.regs = childregs; |
---|
1649 | | - childregs->gpr[3] = 0; /* Result from fork() */ |
---|
| 1710 | + /* 64s sets this in ret_from_fork */ |
---|
| 1711 | + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64)) |
---|
| 1712 | + childregs->gpr[3] = 0; /* Result from fork() */ |
---|
1650 | 1713 | if (clone_flags & CLONE_SETTLS) { |
---|
1651 | | -#ifdef CONFIG_PPC64 |
---|
1652 | 1714 | if (!is_32bit_task()) |
---|
1653 | | - childregs->gpr[13] = childregs->gpr[6]; |
---|
| 1715 | + childregs->gpr[13] = tls; |
---|
1654 | 1716 | else |
---|
1655 | | -#endif |
---|
1656 | | - childregs->gpr[2] = childregs->gpr[6]; |
---|
| 1717 | + childregs->gpr[2] = tls; |
---|
1657 | 1718 | } |
---|
1658 | 1719 | |
---|
1659 | | - f = ret_from_fork; |
---|
| 1720 | + if (trap_is_scv(regs)) |
---|
| 1721 | + f = ret_from_fork_scv; |
---|
| 1722 | + else |
---|
| 1723 | + f = ret_from_fork; |
---|
1660 | 1724 | } |
---|
1661 | 1725 | childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX); |
---|
1662 | 1726 | sp -= STACK_FRAME_OVERHEAD; |
---|
.. | .. |
---|
1675 | 1739 | sp -= STACK_FRAME_OVERHEAD; |
---|
1676 | 1740 | p->thread.ksp = sp; |
---|
1677 | 1741 | #ifdef CONFIG_PPC32 |
---|
1678 | | - p->thread.ksp_limit = (unsigned long)task_stack_page(p) + |
---|
1679 | | - _ALIGN_UP(sizeof(struct thread_info), 16); |
---|
| 1742 | + p->thread.ksp_limit = (unsigned long)end_of_stack(p); |
---|
1680 | 1743 | #endif |
---|
1681 | 1744 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
---|
1682 | | - p->thread.ptrace_bps[0] = NULL; |
---|
| 1745 | + for (i = 0; i < nr_wp_slots(); i++) |
---|
| 1746 | + p->thread.ptrace_bps[i] = NULL; |
---|
1683 | 1747 | #endif |
---|
1684 | 1748 | |
---|
1685 | 1749 | p->thread.fp_save_area = NULL; |
---|
.. | .. |
---|
1695 | 1759 | p->thread.dscr = mfspr(SPRN_DSCR); |
---|
1696 | 1760 | } |
---|
1697 | 1761 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) |
---|
1698 | | - p->thread.ppr = INIT_PPR; |
---|
| 1762 | + childregs->ppr = DEFAULT_PPR; |
---|
1699 | 1763 | |
---|
1700 | 1764 | p->thread.tidr = 0; |
---|
1701 | 1765 | #endif |
---|
1702 | 1766 | kregs->nip = ppc_function_entry(f); |
---|
1703 | 1767 | return 0; |
---|
1704 | 1768 | } |
---|
| 1769 | + |
---|
| 1770 | +void preload_new_slb_context(unsigned long start, unsigned long sp); |
---|
1705 | 1771 | |
---|
1706 | 1772 | /* |
---|
1707 | 1773 | * Set up a thread for executing a new program |
---|
.. | .. |
---|
1710 | 1776 | { |
---|
1711 | 1777 | #ifdef CONFIG_PPC64 |
---|
1712 | 1778 | unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ |
---|
| 1779 | + |
---|
| 1780 | + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled()) |
---|
| 1781 | + preload_new_slb_context(start, sp); |
---|
1713 | 1782 | #endif |
---|
1714 | 1783 | |
---|
1715 | 1784 | /* |
---|
.. | .. |
---|
1731 | 1800 | tm_reclaim_current(0); |
---|
1732 | 1801 | #endif |
---|
1733 | 1802 | |
---|
1734 | | - memset(regs->gpr, 0, sizeof(regs->gpr)); |
---|
| 1803 | + memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0])); |
---|
1735 | 1804 | regs->ctr = 0; |
---|
1736 | 1805 | regs->link = 0; |
---|
1737 | 1806 | regs->xer = 0; |
---|
.. | .. |
---|
1743 | 1812 | * FULL_REGS(regs) return true. This is necessary to allow |
---|
1744 | 1813 | * ptrace to examine the thread immediately after exec. |
---|
1745 | 1814 | */ |
---|
1746 | | - regs->trap &= ~1UL; |
---|
| 1815 | + SET_FULL_REGS(regs); |
---|
1747 | 1816 | |
---|
1748 | 1817 | #ifdef CONFIG_PPC32 |
---|
1749 | 1818 | regs->mq = 0; |
---|
.. | .. |
---|
1800 | 1869 | #ifdef CONFIG_VSX |
---|
1801 | 1870 | current->thread.used_vsr = 0; |
---|
1802 | 1871 | #endif |
---|
| 1872 | + current->thread.load_slb = 0; |
---|
1803 | 1873 | current->thread.load_fp = 0; |
---|
1804 | 1874 | memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); |
---|
1805 | 1875 | current->thread.fp_save_area = NULL; |
---|
.. | .. |
---|
1840 | 1910 | * fpexc_mode. fpexc_mode is also used for setting FP exception |
---|
1841 | 1911 | * mode (asyn, precise, disabled) for 'Classic' FP. */ |
---|
1842 | 1912 | if (val & PR_FP_EXC_SW_ENABLE) { |
---|
1843 | | -#ifdef CONFIG_SPE |
---|
1844 | 1913 | if (cpu_has_feature(CPU_FTR_SPE)) { |
---|
1845 | 1914 | /* |
---|
1846 | 1915 | * When the sticky exception bits are set |
---|
.. | .. |
---|
1854 | 1923 | * anyway to restore the prctl settings from |
---|
1855 | 1924 | * the saved environment. |
---|
1856 | 1925 | */ |
---|
| 1926 | +#ifdef CONFIG_SPE |
---|
1857 | 1927 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); |
---|
1858 | 1928 | tsk->thread.fpexc_mode = val & |
---|
1859 | 1929 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); |
---|
| 1930 | +#endif |
---|
1860 | 1931 | return 0; |
---|
1861 | 1932 | } else { |
---|
1862 | 1933 | return -EINVAL; |
---|
1863 | 1934 | } |
---|
1864 | | -#else |
---|
1865 | | - return -EINVAL; |
---|
1866 | | -#endif |
---|
1867 | 1935 | } |
---|
1868 | 1936 | |
---|
1869 | 1937 | /* on a CONFIG_SPE this does not hurt us. The bits that |
---|
.. | .. |
---|
1882 | 1950 | |
---|
1883 | 1951 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) |
---|
1884 | 1952 | { |
---|
1885 | | - unsigned int val; |
---|
| 1953 | + unsigned int val = 0; |
---|
1886 | 1954 | |
---|
1887 | | - if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) |
---|
1888 | | -#ifdef CONFIG_SPE |
---|
| 1955 | + if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { |
---|
1889 | 1956 | if (cpu_has_feature(CPU_FTR_SPE)) { |
---|
1890 | 1957 | /* |
---|
1891 | 1958 | * When the sticky exception bits are set |
---|
.. | .. |
---|
1899 | 1966 | * anyway to restore the prctl settings from |
---|
1900 | 1967 | * the saved environment. |
---|
1901 | 1968 | */ |
---|
| 1969 | +#ifdef CONFIG_SPE |
---|
1902 | 1970 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); |
---|
1903 | 1971 | val = tsk->thread.fpexc_mode; |
---|
| 1972 | +#endif |
---|
1904 | 1973 | } else |
---|
1905 | 1974 | return -EINVAL; |
---|
1906 | | -#else |
---|
1907 | | - return -EINVAL; |
---|
1908 | | -#endif |
---|
1909 | | - else |
---|
| 1975 | + } else { |
---|
1910 | 1976 | val = __unpack_fe01(tsk->thread.fpexc_mode); |
---|
| 1977 | + } |
---|
1911 | 1978 | return put_user(val, (unsigned int __user *) adr); |
---|
1912 | 1979 | } |
---|
1913 | 1980 | |
---|
.. | .. |
---|
1972 | 2039 | unsigned long stack_page; |
---|
1973 | 2040 | unsigned long cpu = task_cpu(p); |
---|
1974 | 2041 | |
---|
1975 | | - /* |
---|
1976 | | - * Avoid crashing if the stack has overflowed and corrupted |
---|
1977 | | - * task_cpu(p), which is in the thread_info struct. |
---|
1978 | | - */ |
---|
1979 | | - if (cpu < NR_CPUS && cpu_possible(cpu)) { |
---|
1980 | | - stack_page = (unsigned long) hardirq_ctx[cpu]; |
---|
1981 | | - if (sp >= stack_page + sizeof(struct thread_struct) |
---|
1982 | | - && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
1983 | | - return 1; |
---|
| 2042 | + stack_page = (unsigned long)hardirq_ctx[cpu]; |
---|
| 2043 | + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
| 2044 | + return 1; |
---|
1984 | 2045 | |
---|
1985 | | - stack_page = (unsigned long) softirq_ctx[cpu]; |
---|
1986 | | - if (sp >= stack_page + sizeof(struct thread_struct) |
---|
1987 | | - && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
1988 | | - return 1; |
---|
1989 | | - } |
---|
| 2046 | + stack_page = (unsigned long)softirq_ctx[cpu]; |
---|
| 2047 | + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
| 2048 | + return 1; |
---|
| 2049 | + |
---|
1990 | 2050 | return 0; |
---|
1991 | 2051 | } |
---|
| 2052 | + |
---|
| 2053 | +static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, |
---|
| 2054 | + unsigned long nbytes) |
---|
| 2055 | +{ |
---|
| 2056 | +#ifdef CONFIG_PPC64 |
---|
| 2057 | + unsigned long stack_page; |
---|
| 2058 | + unsigned long cpu = task_cpu(p); |
---|
| 2059 | + |
---|
| 2060 | + stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE; |
---|
| 2061 | + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
| 2062 | + return 1; |
---|
| 2063 | + |
---|
| 2064 | +# ifdef CONFIG_PPC_BOOK3S_64 |
---|
| 2065 | + stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE; |
---|
| 2066 | + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
| 2067 | + return 1; |
---|
| 2068 | + |
---|
| 2069 | + stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE; |
---|
| 2070 | + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
| 2071 | + return 1; |
---|
| 2072 | +# endif |
---|
| 2073 | +#endif |
---|
| 2074 | + |
---|
| 2075 | + return 0; |
---|
| 2076 | +} |
---|
| 2077 | + |
---|
1992 | 2078 | |
---|
1993 | 2079 | int validate_sp(unsigned long sp, struct task_struct *p, |
---|
1994 | 2080 | unsigned long nbytes) |
---|
1995 | 2081 | { |
---|
1996 | 2082 | unsigned long stack_page = (unsigned long)task_stack_page(p); |
---|
1997 | 2083 | |
---|
1998 | | - if (sp >= stack_page + sizeof(struct thread_struct) |
---|
1999 | | - && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
| 2084 | + if (sp < THREAD_SIZE) |
---|
| 2085 | + return 0; |
---|
| 2086 | + |
---|
| 2087 | + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) |
---|
2000 | 2088 | return 1; |
---|
2001 | 2089 | |
---|
2002 | | - return valid_irq_stack(sp, p, nbytes); |
---|
| 2090 | + if (valid_irq_stack(sp, p, nbytes)) |
---|
| 2091 | + return 1; |
---|
| 2092 | + |
---|
| 2093 | + return valid_emergency_stack(sp, p, nbytes); |
---|
2003 | 2094 | } |
---|
2004 | 2095 | |
---|
2005 | 2096 | EXPORT_SYMBOL(validate_sp); |
---|
2006 | 2097 | |
---|
2007 | | -unsigned long get_wchan(struct task_struct *p) |
---|
| 2098 | +static unsigned long __get_wchan(struct task_struct *p) |
---|
2008 | 2099 | { |
---|
2009 | 2100 | unsigned long ip, sp; |
---|
2010 | 2101 | int count = 0; |
---|
.. | .. |
---|
2017 | 2108 | return 0; |
---|
2018 | 2109 | |
---|
2019 | 2110 | do { |
---|
2020 | | - sp = *(unsigned long *)sp; |
---|
| 2111 | + sp = READ_ONCE_NOCHECK(*(unsigned long *)sp); |
---|
2021 | 2112 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || |
---|
2022 | 2113 | p->state == TASK_RUNNING) |
---|
2023 | 2114 | return 0; |
---|
2024 | 2115 | if (count > 0) { |
---|
2025 | | - ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; |
---|
| 2116 | + ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]); |
---|
2026 | 2117 | if (!in_sched_functions(ip)) |
---|
2027 | 2118 | return ip; |
---|
2028 | 2119 | } |
---|
.. | .. |
---|
2030 | 2121 | return 0; |
---|
2031 | 2122 | } |
---|
2032 | 2123 | |
---|
| 2124 | +unsigned long get_wchan(struct task_struct *p) |
---|
| 2125 | +{ |
---|
| 2126 | + unsigned long ret; |
---|
| 2127 | + |
---|
| 2128 | + if (!try_get_task_stack(p)) |
---|
| 2129 | + return 0; |
---|
| 2130 | + |
---|
| 2131 | + ret = __get_wchan(p); |
---|
| 2132 | + |
---|
| 2133 | + put_task_stack(p); |
---|
| 2134 | + |
---|
| 2135 | + return ret; |
---|
| 2136 | +} |
---|
| 2137 | + |
---|
2033 | 2138 | static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; |
---|
2034 | 2139 | |
---|
2035 | | -void show_stack(struct task_struct *tsk, unsigned long *stack) |
---|
| 2140 | +void show_stack(struct task_struct *tsk, unsigned long *stack, |
---|
| 2141 | + const char *loglvl) |
---|
2036 | 2142 | { |
---|
2037 | 2143 | unsigned long sp, ip, lr, newsp; |
---|
2038 | 2144 | int count = 0; |
---|
2039 | 2145 | int firstframe = 1; |
---|
2040 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
2041 | | - int curr_frame = current->curr_ret_stack; |
---|
2042 | | - extern void return_to_handler(void); |
---|
2043 | | - unsigned long rth = (unsigned long)return_to_handler; |
---|
2044 | | -#endif |
---|
| 2146 | + unsigned long ret_addr; |
---|
| 2147 | + int ftrace_idx = 0; |
---|
2045 | 2148 | |
---|
2046 | | - sp = (unsigned long) stack; |
---|
2047 | 2149 | if (tsk == NULL) |
---|
2048 | 2150 | tsk = current; |
---|
| 2151 | + |
---|
| 2152 | + if (!try_get_task_stack(tsk)) |
---|
| 2153 | + return; |
---|
| 2154 | + |
---|
| 2155 | + sp = (unsigned long) stack; |
---|
2049 | 2156 | if (sp == 0) { |
---|
2050 | 2157 | if (tsk == current) |
---|
2051 | | - sp = current_stack_pointer(); |
---|
| 2158 | + sp = current_stack_frame(); |
---|
2052 | 2159 | else |
---|
2053 | 2160 | sp = tsk->thread.ksp; |
---|
2054 | 2161 | } |
---|
2055 | 2162 | |
---|
2056 | 2163 | lr = 0; |
---|
2057 | | - printk("Call Trace:\n"); |
---|
| 2164 | + printk("%sCall Trace:\n", loglvl); |
---|
2058 | 2165 | do { |
---|
2059 | 2166 | if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) |
---|
2060 | | - return; |
---|
| 2167 | + break; |
---|
2061 | 2168 | |
---|
2062 | 2169 | stack = (unsigned long *) sp; |
---|
2063 | 2170 | newsp = stack[0]; |
---|
2064 | 2171 | ip = stack[STACK_FRAME_LR_SAVE]; |
---|
2065 | 2172 | if (!firstframe || ip != lr) { |
---|
2066 | | - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); |
---|
2067 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
2068 | | - if ((ip == rth) && curr_frame >= 0) { |
---|
2069 | | - pr_cont(" (%pS)", |
---|
2070 | | - (void *)current->ret_stack[curr_frame].ret); |
---|
2071 | | - curr_frame--; |
---|
2072 | | - } |
---|
2073 | | -#endif |
---|
| 2173 | + printk("%s["REG"] ["REG"] %pS", |
---|
| 2174 | + loglvl, sp, ip, (void *)ip); |
---|
| 2175 | + ret_addr = ftrace_graph_ret_addr(current, |
---|
| 2176 | + &ftrace_idx, ip, stack); |
---|
| 2177 | + if (ret_addr != ip) |
---|
| 2178 | + pr_cont(" (%pS)", (void *)ret_addr); |
---|
2074 | 2179 | if (firstframe) |
---|
2075 | 2180 | pr_cont(" (unreliable)"); |
---|
2076 | 2181 | pr_cont("\n"); |
---|
.. | .. |
---|
2081 | 2186 | * See if this is an exception frame. |
---|
2082 | 2187 | * We look for the "regshere" marker in the current frame. |
---|
2083 | 2188 | */ |
---|
2084 | | - if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) |
---|
| 2189 | + if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS) |
---|
2085 | 2190 | && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { |
---|
2086 | 2191 | struct pt_regs *regs = (struct pt_regs *) |
---|
2087 | 2192 | (sp + STACK_FRAME_OVERHEAD); |
---|
2088 | 2193 | lr = regs->link; |
---|
2089 | | - printk("--- interrupt: %lx at %pS\n LR = %pS\n", |
---|
2090 | | - regs->trap, (void *)regs->nip, (void *)lr); |
---|
| 2194 | + printk("%s--- interrupt: %lx at %pS\n LR = %pS\n", |
---|
| 2195 | + loglvl, regs->trap, |
---|
| 2196 | + (void *)regs->nip, (void *)lr); |
---|
2091 | 2197 | firstframe = 1; |
---|
2092 | 2198 | } |
---|
2093 | 2199 | |
---|
2094 | 2200 | sp = newsp; |
---|
2095 | 2201 | } while (count++ < kstack_depth_to_print); |
---|
| 2202 | + |
---|
| 2203 | + put_task_stack(tsk); |
---|
2096 | 2204 | } |
---|
2097 | 2205 | |
---|
2098 | 2206 | #ifdef CONFIG_PPC64 |
---|