.. | .. |
---|
27 | 27 | #include <asm/irq_regs.h> |
---|
28 | 28 | #include <linux/kvm_para.h> |
---|
29 | 29 | |
---|
| 30 | +#include <trace/hooks/softlockup.h> |
---|
| 31 | + |
---|
| 32 | +#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP) |
---|
| 33 | +#include <soc/rockchip/rk_minidump.h> |
---|
| 34 | +#endif |
---|
| 35 | + |
---|
30 | 36 | static DEFINE_MUTEX(watchdog_mutex); |
---|
31 | 37 | |
---|
32 | 38 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) |
---|
.. | .. |
---|
42 | 48 | int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT; |
---|
43 | 49 | int __read_mostly soft_watchdog_user_enabled = 1; |
---|
44 | 50 | int __read_mostly watchdog_thresh = 10; |
---|
45 | | -int __read_mostly nmi_watchdog_available; |
---|
46 | | - |
---|
47 | | -struct cpumask watchdog_allowed_mask __read_mostly; |
---|
| 51 | +static int __read_mostly nmi_watchdog_available; |
---|
48 | 52 | |
---|
49 | 53 | struct cpumask watchdog_cpumask __read_mostly; |
---|
50 | 54 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); |
---|
51 | 55 | |
---|
52 | 56 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
---|
| 57 | + |
---|
| 58 | +# ifdef CONFIG_SMP |
---|
| 59 | +int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
---|
| 60 | +# endif /* CONFIG_SMP */ |
---|
53 | 61 | |
---|
54 | 62 | ATOMIC_NOTIFIER_HEAD(hardlock_notifier_list); |
---|
55 | 63 | |
---|
.. | .. |
---|
85 | 93 | } |
---|
86 | 94 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
---|
87 | 95 | |
---|
88 | | -# ifdef CONFIG_SMP |
---|
89 | | -int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
---|
90 | | - |
---|
91 | | -static int __init hardlockup_all_cpu_backtrace_setup(char *str) |
---|
92 | | -{ |
---|
93 | | - sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
---|
94 | | - return 1; |
---|
95 | | -} |
---|
96 | | -__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); |
---|
97 | | -# endif /* CONFIG_SMP */ |
---|
98 | 96 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
---|
99 | 97 | |
---|
100 | 98 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU |
---|
.. | .. |
---|
206 | 204 | |
---|
207 | 205 | #define SOFTLOCKUP_RESET ULONG_MAX |
---|
208 | 206 | |
---|
| 207 | +#ifdef CONFIG_SMP |
---|
| 208 | +int __read_mostly sysctl_softlockup_all_cpu_backtrace; |
---|
| 209 | +#endif |
---|
| 210 | + |
---|
| 211 | +static struct cpumask watchdog_allowed_mask __read_mostly; |
---|
| 212 | + |
---|
209 | 213 | /* Global variables, exported for sysctl */ |
---|
210 | 214 | unsigned int __read_mostly softlockup_panic = |
---|
211 | 215 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
---|
.. | .. |
---|
216 | 220 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
---|
217 | 221 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
---|
218 | 222 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
---|
219 | | -static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
---|
220 | 223 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
---|
221 | | -static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); |
---|
222 | | -static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); |
---|
223 | 224 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
---|
224 | 225 | static unsigned long soft_lockup_nmi_warn; |
---|
225 | | - |
---|
226 | | -static int __init softlockup_panic_setup(char *str) |
---|
227 | | -{ |
---|
228 | | - softlockup_panic = simple_strtoul(str, NULL, 0); |
---|
229 | | - return 1; |
---|
230 | | -} |
---|
231 | | -__setup("softlockup_panic=", softlockup_panic_setup); |
---|
232 | 226 | |
---|
233 | 227 | static int __init nowatchdog_setup(char *str) |
---|
234 | 228 | { |
---|
.. | .. |
---|
244 | 238 | } |
---|
245 | 239 | __setup("nosoftlockup", nosoftlockup_setup); |
---|
246 | 240 | |
---|
247 | | -#ifdef CONFIG_SMP |
---|
248 | | -int __read_mostly sysctl_softlockup_all_cpu_backtrace; |
---|
249 | | - |
---|
250 | | -static int __init softlockup_all_cpu_backtrace_setup(char *str) |
---|
| 241 | +static int __init watchdog_thresh_setup(char *str) |
---|
251 | 242 | { |
---|
252 | | - sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
---|
| 243 | + get_option(&str, &watchdog_thresh); |
---|
253 | 244 | return 1; |
---|
254 | 245 | } |
---|
255 | | -__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); |
---|
256 | | -#endif |
---|
| 246 | +__setup("watchdog_thresh=", watchdog_thresh_setup); |
---|
257 | 247 | |
---|
258 | 248 | static void __lockup_detector_cleanup(void); |
---|
259 | 249 | |
---|
.. | .. |
---|
293 | 283 | } |
---|
294 | 284 | |
---|
295 | 285 | /* Commands for resetting the watchdog */ |
---|
296 | | -static void __touch_watchdog(void) |
---|
| 286 | +static void update_touch_ts(void) |
---|
297 | 287 | { |
---|
298 | 288 | __this_cpu_write(watchdog_touch_ts, get_timestamp()); |
---|
299 | 289 | } |
---|
.. | .. |
---|
426 | 416 | if (per_cpu(hard_watchdog_warn, next_cpu) == true) |
---|
427 | 417 | return; |
---|
428 | 418 | |
---|
429 | | - atomic_notifier_call_chain(&hardlock_notifier_list, 0, NULL); |
---|
| 419 | + atomic_notifier_call_chain(&hardlock_notifier_list, next_cpu, NULL); |
---|
430 | 420 | |
---|
431 | 421 | if (hardlockup_panic) |
---|
432 | 422 | panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu); |
---|
.. | .. |
---|
460 | 450 | */ |
---|
461 | 451 | static int softlockup_fn(void *data) |
---|
462 | 452 | { |
---|
463 | | - __this_cpu_write(soft_lockup_hrtimer_cnt, |
---|
464 | | - __this_cpu_read(hrtimer_interrupts)); |
---|
465 | | - __touch_watchdog(); |
---|
| 453 | + update_touch_ts(); |
---|
466 | 454 | complete(this_cpu_ptr(&softlockup_completion)); |
---|
467 | 455 | |
---|
468 | 456 | return 0; |
---|
.. | .. |
---|
509 | 497 | |
---|
510 | 498 | /* Clear the guest paused flag on watchdog reset */ |
---|
511 | 499 | kvm_check_and_clear_guest_paused(); |
---|
512 | | - __touch_watchdog(); |
---|
| 500 | + update_touch_ts(); |
---|
513 | 501 | return HRTIMER_RESTART; |
---|
514 | 502 | } |
---|
515 | 503 | |
---|
.. | .. |
---|
529 | 517 | if (kvm_check_and_clear_guest_paused()) |
---|
530 | 518 | return HRTIMER_RESTART; |
---|
531 | 519 | |
---|
532 | | - /* only warn once */ |
---|
533 | | - if (__this_cpu_read(soft_watchdog_warn) == true) { |
---|
534 | | - /* |
---|
535 | | - * When multiple processes are causing softlockups the |
---|
536 | | - * softlockup detector only warns on the first one |
---|
537 | | - * because the code relies on a full quiet cycle to |
---|
538 | | - * re-arm. The second process prevents the quiet cycle |
---|
539 | | - * and never gets reported. Use task pointers to detect |
---|
540 | | - * this. |
---|
541 | | - */ |
---|
542 | | - if (__this_cpu_read(softlockup_task_ptr_saved) != |
---|
543 | | - current) { |
---|
544 | | - __this_cpu_write(soft_watchdog_warn, false); |
---|
545 | | - __touch_watchdog(); |
---|
546 | | - } |
---|
547 | | - return HRTIMER_RESTART; |
---|
| 520 | + /* |
---|
| 521 | + * Prevent multiple soft-lockup reports if one cpu is already |
---|
| 522 | + * engaged in dumping all cpu back traces. |
---|
| 523 | + */ |
---|
| 524 | + if (softlockup_all_cpu_backtrace) { |
---|
| 525 | + if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn)) |
---|
| 526 | + return HRTIMER_RESTART; |
---|
548 | 527 | } |
---|
549 | 528 | |
---|
550 | | - if (softlockup_all_cpu_backtrace) { |
---|
551 | | - /* Prevent multiple soft-lockup reports if one cpu is already |
---|
552 | | - * engaged in dumping cpu back traces |
---|
553 | | - */ |
---|
554 | | - if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { |
---|
555 | | - /* Someone else will report us. Let's give up */ |
---|
556 | | - __this_cpu_write(soft_watchdog_warn, true); |
---|
557 | | - return HRTIMER_RESTART; |
---|
558 | | - } |
---|
559 | | - } |
---|
| 529 | + /* Start period for the next softlockup warning. */ |
---|
| 530 | + update_touch_ts(); |
---|
560 | 531 | |
---|
561 | 532 | pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
---|
562 | 533 | smp_processor_id(), duration, |
---|
563 | 534 | current->comm, task_pid_nr(current)); |
---|
564 | | - __this_cpu_write(softlockup_task_ptr_saved, current); |
---|
565 | 535 | print_modules(); |
---|
566 | 536 | print_irqtrace_events(current); |
---|
567 | 537 | if (regs) |
---|
.. | .. |
---|
570 | 540 | dump_stack(); |
---|
571 | 541 | |
---|
572 | 542 | if (softlockup_all_cpu_backtrace) { |
---|
573 | | - /* Avoid generating two back traces for current |
---|
574 | | - * given that one is already made above |
---|
575 | | - */ |
---|
576 | 543 | trigger_allbutself_cpu_backtrace(); |
---|
577 | | - |
---|
578 | | - clear_bit(0, &soft_lockup_nmi_warn); |
---|
579 | | - /* Barrier to sync with other cpus */ |
---|
580 | | - smp_mb__after_atomic(); |
---|
| 544 | + clear_bit_unlock(0, &soft_lockup_nmi_warn); |
---|
581 | 545 | } |
---|
582 | 546 | |
---|
| 547 | + trace_android_vh_watchdog_timer_softlockup(duration, regs, !!softlockup_panic); |
---|
583 | 548 | add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); |
---|
| 549 | +#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP) |
---|
| 550 | + rk_minidump_update_cpu_regs(regs); |
---|
| 551 | +#endif |
---|
584 | 552 | if (softlockup_panic) |
---|
585 | 553 | panic("softlockup: hung tasks"); |
---|
586 | | - __this_cpu_write(soft_watchdog_warn, true); |
---|
587 | | - } else |
---|
588 | | - __this_cpu_write(soft_watchdog_warn, false); |
---|
| 554 | + } |
---|
589 | 555 | |
---|
590 | 556 | return HRTIMER_RESTART; |
---|
591 | 557 | } |
---|
.. | .. |
---|
607 | 573 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
---|
608 | 574 | hrtimer->function = watchdog_timer_fn; |
---|
609 | 575 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), |
---|
610 | | - HRTIMER_MODE_REL_PINNED); |
---|
| 576 | + HRTIMER_MODE_REL_PINNED_HARD); |
---|
611 | 577 | |
---|
612 | 578 | /* Initialize timestamp */ |
---|
613 | | - __touch_watchdog(); |
---|
| 579 | + update_touch_ts(); |
---|
614 | 580 | /* Enable the perf event */ |
---|
615 | 581 | if (watchdog_enabled & NMI_WATCHDOG_ENABLED) |
---|
616 | 582 | watchdog_nmi_enable(cpu); |
---|
.. | .. |
---|
682 | 648 | return 0; |
---|
683 | 649 | } |
---|
684 | 650 | |
---|
685 | | -static void lockup_detector_reconfigure(void) |
---|
| 651 | +static void __lockup_detector_reconfigure(void) |
---|
686 | 652 | { |
---|
687 | 653 | cpus_read_lock(); |
---|
688 | 654 | watchdog_nmi_stop(); |
---|
.. | .. |
---|
702 | 668 | __lockup_detector_cleanup(); |
---|
703 | 669 | } |
---|
704 | 670 | |
---|
| 671 | +void lockup_detector_reconfigure(void) |
---|
| 672 | +{ |
---|
| 673 | + mutex_lock(&watchdog_mutex); |
---|
| 674 | + __lockup_detector_reconfigure(); |
---|
| 675 | + mutex_unlock(&watchdog_mutex); |
---|
| 676 | +} |
---|
| 677 | + |
---|
705 | 678 | /* |
---|
706 | 679 | * Create the watchdog thread infrastructure and configure the detector(s). |
---|
707 | 680 | * |
---|
708 | 681 | * The threads are not unparked as watchdog_allowed_mask is empty. When |
---|
709 | | - * the threads are sucessfully initialized, take the proper locks and |
---|
| 682 | + * the threads are successfully initialized, take the proper locks and |
---|
710 | 683 | * unpark the threads in the watchdog_cpumask if the watchdog is enabled. |
---|
711 | 684 | */ |
---|
712 | 685 | static __init void lockup_detector_setup(void) |
---|
.. | .. |
---|
722 | 695 | return; |
---|
723 | 696 | |
---|
724 | 697 | mutex_lock(&watchdog_mutex); |
---|
725 | | - lockup_detector_reconfigure(); |
---|
| 698 | + __lockup_detector_reconfigure(); |
---|
726 | 699 | softlockup_initialized = true; |
---|
727 | 700 | mutex_unlock(&watchdog_mutex); |
---|
728 | 701 | } |
---|
729 | 702 | |
---|
730 | 703 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ |
---|
731 | | -static void lockup_detector_reconfigure(void) |
---|
| 704 | +static void __lockup_detector_reconfigure(void) |
---|
732 | 705 | { |
---|
733 | 706 | cpus_read_lock(); |
---|
734 | 707 | watchdog_nmi_stop(); |
---|
.. | .. |
---|
736 | 709 | watchdog_nmi_start(); |
---|
737 | 710 | cpus_read_unlock(); |
---|
738 | 711 | } |
---|
| 712 | +void lockup_detector_reconfigure(void) |
---|
| 713 | +{ |
---|
| 714 | + __lockup_detector_reconfigure(); |
---|
| 715 | +} |
---|
739 | 716 | static inline void lockup_detector_setup(void) |
---|
740 | 717 | { |
---|
741 | | - lockup_detector_reconfigure(); |
---|
| 718 | + __lockup_detector_reconfigure(); |
---|
742 | 719 | } |
---|
743 | 720 | #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ |
---|
744 | 721 | |
---|
.. | .. |
---|
778 | 755 | { |
---|
779 | 756 | /* Remove impossible cpus to keep sysctl output clean. */ |
---|
780 | 757 | cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); |
---|
781 | | - lockup_detector_reconfigure(); |
---|
| 758 | + __lockup_detector_reconfigure(); |
---|
782 | 759 | } |
---|
783 | 760 | |
---|
784 | 761 | /* |
---|
.. | .. |
---|
794 | 771 | * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED |
---|
795 | 772 | */ |
---|
796 | 773 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, |
---|
797 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 774 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
798 | 775 | { |
---|
799 | 776 | int err, old, *param = table->data; |
---|
800 | 777 | |
---|
.. | .. |
---|
821 | 798 | * /proc/sys/kernel/watchdog |
---|
822 | 799 | */ |
---|
823 | 800 | int proc_watchdog(struct ctl_table *table, int write, |
---|
824 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 801 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
825 | 802 | { |
---|
826 | 803 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED, |
---|
827 | 804 | table, write, buffer, lenp, ppos); |
---|
.. | .. |
---|
831 | 808 | * /proc/sys/kernel/nmi_watchdog |
---|
832 | 809 | */ |
---|
833 | 810 | int proc_nmi_watchdog(struct ctl_table *table, int write, |
---|
834 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 811 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
835 | 812 | { |
---|
836 | 813 | if (!nmi_watchdog_available && write) |
---|
837 | 814 | return -ENOTSUPP; |
---|
.. | .. |
---|
843 | 820 | * /proc/sys/kernel/soft_watchdog |
---|
844 | 821 | */ |
---|
845 | 822 | int proc_soft_watchdog(struct ctl_table *table, int write, |
---|
846 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 823 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
847 | 824 | { |
---|
848 | 825 | return proc_watchdog_common(SOFT_WATCHDOG_ENABLED, |
---|
849 | 826 | table, write, buffer, lenp, ppos); |
---|
.. | .. |
---|
853 | 830 | * /proc/sys/kernel/watchdog_thresh |
---|
854 | 831 | */ |
---|
855 | 832 | int proc_watchdog_thresh(struct ctl_table *table, int write, |
---|
856 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 833 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
857 | 834 | { |
---|
858 | 835 | int err, old; |
---|
859 | 836 | |
---|
.. | .. |
---|
876 | 853 | * been brought online, if desired. |
---|
877 | 854 | */ |
---|
878 | 855 | int proc_watchdog_cpumask(struct ctl_table *table, int write, |
---|
879 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 856 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
880 | 857 | { |
---|
881 | 858 | int err; |
---|
882 | 859 | |
---|