.. | .. |
---|
27 | 27 | #include <asm/irq_regs.h> |
---|
28 | 28 | #include <linux/kvm_para.h> |
---|
29 | 29 | |
---|
| 30 | +#include <trace/hooks/softlockup.h> |
---|
| 31 | + |
---|
30 | 32 | static DEFINE_MUTEX(watchdog_mutex); |
---|
31 | 33 | |
---|
32 | 34 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) |
---|
.. | .. |
---|
42 | 44 | int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT; |
---|
43 | 45 | int __read_mostly soft_watchdog_user_enabled = 1; |
---|
44 | 46 | int __read_mostly watchdog_thresh = 10; |
---|
45 | | -int __read_mostly nmi_watchdog_available; |
---|
46 | | - |
---|
47 | | -struct cpumask watchdog_allowed_mask __read_mostly; |
---|
| 47 | +static int __read_mostly nmi_watchdog_available; |
---|
48 | 48 | |
---|
49 | 49 | struct cpumask watchdog_cpumask __read_mostly; |
---|
50 | 50 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); |
---|
51 | 51 | |
---|
52 | 52 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
---|
| 53 | + |
---|
| 54 | +# ifdef CONFIG_SMP |
---|
| 55 | +int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
---|
| 56 | +# endif /* CONFIG_SMP */ |
---|
53 | 57 | |
---|
54 | 58 | ATOMIC_NOTIFIER_HEAD(hardlock_notifier_list); |
---|
55 | 59 | |
---|
.. | .. |
---|
85 | 89 | } |
---|
86 | 90 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
---|
87 | 91 | |
---|
88 | | -# ifdef CONFIG_SMP |
---|
89 | | -int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
---|
90 | | - |
---|
91 | | -static int __init hardlockup_all_cpu_backtrace_setup(char *str) |
---|
92 | | -{ |
---|
93 | | - sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
---|
94 | | - return 1; |
---|
95 | | -} |
---|
96 | | -__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); |
---|
97 | | -# endif /* CONFIG_SMP */ |
---|
98 | 92 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
---|
99 | 93 | |
---|
100 | 94 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU |
---|
.. | .. |
---|
206 | 200 | |
---|
207 | 201 | #define SOFTLOCKUP_RESET ULONG_MAX |
---|
208 | 202 | |
---|
| 203 | +#ifdef CONFIG_SMP |
---|
| 204 | +int __read_mostly sysctl_softlockup_all_cpu_backtrace; |
---|
| 205 | +#endif |
---|
| 206 | + |
---|
| 207 | +static struct cpumask watchdog_allowed_mask __read_mostly; |
---|
| 208 | + |
---|
209 | 209 | /* Global variables, exported for sysctl */ |
---|
210 | 210 | unsigned int __read_mostly softlockup_panic = |
---|
211 | 211 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
---|
.. | .. |
---|
216 | 216 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
---|
217 | 217 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
---|
218 | 218 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
---|
219 | | -static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
---|
220 | 219 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
---|
221 | | -static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); |
---|
222 | | -static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); |
---|
223 | 220 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
---|
224 | 221 | static unsigned long soft_lockup_nmi_warn; |
---|
225 | | - |
---|
226 | | -static int __init softlockup_panic_setup(char *str) |
---|
227 | | -{ |
---|
228 | | - softlockup_panic = simple_strtoul(str, NULL, 0); |
---|
229 | | - return 1; |
---|
230 | | -} |
---|
231 | | -__setup("softlockup_panic=", softlockup_panic_setup); |
---|
232 | 222 | |
---|
233 | 223 | static int __init nowatchdog_setup(char *str) |
---|
234 | 224 | { |
---|
.. | .. |
---|
244 | 234 | } |
---|
245 | 235 | __setup("nosoftlockup", nosoftlockup_setup); |
---|
246 | 236 | |
---|
247 | | -#ifdef CONFIG_SMP |
---|
248 | | -int __read_mostly sysctl_softlockup_all_cpu_backtrace; |
---|
249 | | - |
---|
250 | | -static int __init softlockup_all_cpu_backtrace_setup(char *str) |
---|
| 237 | +static int __init watchdog_thresh_setup(char *str) |
---|
251 | 238 | { |
---|
252 | | - sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
---|
| 239 | + get_option(&str, &watchdog_thresh); |
---|
253 | 240 | return 1; |
---|
254 | 241 | } |
---|
255 | | -__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); |
---|
256 | | -#endif |
---|
| 242 | +__setup("watchdog_thresh=", watchdog_thresh_setup); |
---|
257 | 243 | |
---|
258 | 244 | static void __lockup_detector_cleanup(void); |
---|
259 | 245 | |
---|
.. | .. |
---|
293 | 279 | } |
---|
294 | 280 | |
---|
295 | 281 | /* Commands for resetting the watchdog */ |
---|
296 | | -static void __touch_watchdog(void) |
---|
| 282 | +static void update_touch_ts(void) |
---|
297 | 283 | { |
---|
298 | 284 | __this_cpu_write(watchdog_touch_ts, get_timestamp()); |
---|
299 | 285 | } |
---|
.. | .. |
---|
426 | 412 | if (per_cpu(hard_watchdog_warn, next_cpu) == true) |
---|
427 | 413 | return; |
---|
428 | 414 | |
---|
429 | | - atomic_notifier_call_chain(&hardlock_notifier_list, 0, NULL); |
---|
430 | | - |
---|
431 | 415 | if (hardlockup_panic) |
---|
432 | 416 | panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu); |
---|
433 | 417 | else |
---|
434 | 418 | WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu); |
---|
435 | 419 | |
---|
| 420 | + atomic_notifier_call_chain(&hardlock_notifier_list, 0, NULL); |
---|
436 | 421 | per_cpu(hard_watchdog_warn, next_cpu) = true; |
---|
437 | 422 | } else { |
---|
438 | 423 | per_cpu(hard_watchdog_warn, next_cpu) = false; |
---|
.. | .. |
---|
460 | 445 | */ |
---|
461 | 446 | static int softlockup_fn(void *data) |
---|
462 | 447 | { |
---|
463 | | - __this_cpu_write(soft_lockup_hrtimer_cnt, |
---|
464 | | - __this_cpu_read(hrtimer_interrupts)); |
---|
465 | | - __touch_watchdog(); |
---|
| 448 | + update_touch_ts(); |
---|
466 | 449 | complete(this_cpu_ptr(&softlockup_completion)); |
---|
467 | 450 | |
---|
468 | 451 | return 0; |
---|
.. | .. |
---|
509 | 492 | |
---|
510 | 493 | /* Clear the guest paused flag on watchdog reset */ |
---|
511 | 494 | kvm_check_and_clear_guest_paused(); |
---|
512 | | - __touch_watchdog(); |
---|
| 495 | + update_touch_ts(); |
---|
513 | 496 | return HRTIMER_RESTART; |
---|
514 | 497 | } |
---|
515 | 498 | |
---|
.. | .. |
---|
529 | 512 | if (kvm_check_and_clear_guest_paused()) |
---|
530 | 513 | return HRTIMER_RESTART; |
---|
531 | 514 | |
---|
532 | | - /* only warn once */ |
---|
533 | | - if (__this_cpu_read(soft_watchdog_warn) == true) { |
---|
534 | | - /* |
---|
535 | | - * When multiple processes are causing softlockups the |
---|
536 | | - * softlockup detector only warns on the first one |
---|
537 | | - * because the code relies on a full quiet cycle to |
---|
538 | | - * re-arm. The second process prevents the quiet cycle |
---|
539 | | - * and never gets reported. Use task pointers to detect |
---|
540 | | - * this. |
---|
541 | | - */ |
---|
542 | | - if (__this_cpu_read(softlockup_task_ptr_saved) != |
---|
543 | | - current) { |
---|
544 | | - __this_cpu_write(soft_watchdog_warn, false); |
---|
545 | | - __touch_watchdog(); |
---|
546 | | - } |
---|
547 | | - return HRTIMER_RESTART; |
---|
| 515 | + /* |
---|
| 516 | + * Prevent multiple soft-lockup reports if one cpu is already |
---|
| 517 | + * engaged in dumping all cpu back traces. |
---|
| 518 | + */ |
---|
| 519 | + if (softlockup_all_cpu_backtrace) { |
---|
| 520 | + if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn)) |
---|
| 521 | + return HRTIMER_RESTART; |
---|
548 | 522 | } |
---|
549 | 523 | |
---|
550 | | - if (softlockup_all_cpu_backtrace) { |
---|
551 | | - /* Prevent multiple soft-lockup reports if one cpu is already |
---|
552 | | - * engaged in dumping cpu back traces |
---|
553 | | - */ |
---|
554 | | - if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { |
---|
555 | | - /* Someone else will report us. Let's give up */ |
---|
556 | | - __this_cpu_write(soft_watchdog_warn, true); |
---|
557 | | - return HRTIMER_RESTART; |
---|
558 | | - } |
---|
559 | | - } |
---|
| 524 | + /* Start period for the next softlockup warning. */ |
---|
| 525 | + update_touch_ts(); |
---|
560 | 526 | |
---|
561 | 527 | pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
---|
562 | 528 | smp_processor_id(), duration, |
---|
563 | 529 | current->comm, task_pid_nr(current)); |
---|
564 | | - __this_cpu_write(softlockup_task_ptr_saved, current); |
---|
565 | 530 | print_modules(); |
---|
566 | 531 | print_irqtrace_events(current); |
---|
567 | 532 | if (regs) |
---|
.. | .. |
---|
570 | 535 | dump_stack(); |
---|
571 | 536 | |
---|
572 | 537 | if (softlockup_all_cpu_backtrace) { |
---|
573 | | - /* Avoid generating two back traces for current |
---|
574 | | - * given that one is already made above |
---|
575 | | - */ |
---|
576 | 538 | trigger_allbutself_cpu_backtrace(); |
---|
577 | | - |
---|
578 | | - clear_bit(0, &soft_lockup_nmi_warn); |
---|
579 | | - /* Barrier to sync with other cpus */ |
---|
580 | | - smp_mb__after_atomic(); |
---|
| 539 | + clear_bit_unlock(0, &soft_lockup_nmi_warn); |
---|
581 | 540 | } |
---|
582 | 541 | |
---|
| 542 | + trace_android_vh_watchdog_timer_softlockup(duration, regs, !!softlockup_panic); |
---|
583 | 543 | add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); |
---|
584 | 544 | if (softlockup_panic) |
---|
585 | 545 | panic("softlockup: hung tasks"); |
---|
586 | | - __this_cpu_write(soft_watchdog_warn, true); |
---|
587 | | - } else |
---|
588 | | - __this_cpu_write(soft_watchdog_warn, false); |
---|
| 546 | + } |
---|
589 | 547 | |
---|
590 | 548 | return HRTIMER_RESTART; |
---|
591 | 549 | } |
---|
.. | .. |
---|
604 | 562 | * Start the timer first to prevent the NMI watchdog triggering |
---|
605 | 563 | * before the timer has a chance to fire. |
---|
606 | 564 | */ |
---|
607 | | - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
---|
| 565 | + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
---|
608 | 566 | hrtimer->function = watchdog_timer_fn; |
---|
609 | 567 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), |
---|
610 | | - HRTIMER_MODE_REL_PINNED); |
---|
| 568 | + HRTIMER_MODE_REL_PINNED_HARD); |
---|
611 | 569 | |
---|
612 | 570 | /* Initialize timestamp */ |
---|
613 | | - __touch_watchdog(); |
---|
| 571 | + update_touch_ts(); |
---|
614 | 572 | /* Enable the perf event */ |
---|
615 | 573 | if (watchdog_enabled & NMI_WATCHDOG_ENABLED) |
---|
616 | 574 | watchdog_nmi_enable(cpu); |
---|
.. | .. |
---|
682 | 640 | return 0; |
---|
683 | 641 | } |
---|
684 | 642 | |
---|
685 | | -static void lockup_detector_reconfigure(void) |
---|
| 643 | +static void __lockup_detector_reconfigure(void) |
---|
686 | 644 | { |
---|
687 | 645 | cpus_read_lock(); |
---|
688 | 646 | watchdog_nmi_stop(); |
---|
.. | .. |
---|
702 | 660 | __lockup_detector_cleanup(); |
---|
703 | 661 | } |
---|
704 | 662 | |
---|
| 663 | +void lockup_detector_reconfigure(void) |
---|
| 664 | +{ |
---|
| 665 | + mutex_lock(&watchdog_mutex); |
---|
| 666 | + __lockup_detector_reconfigure(); |
---|
| 667 | + mutex_unlock(&watchdog_mutex); |
---|
| 668 | +} |
---|
| 669 | + |
---|
705 | 670 | /* |
---|
706 | 671 | * Create the watchdog thread infrastructure and configure the detector(s). |
---|
707 | 672 | * |
---|
708 | 673 | * The threads are not unparked as watchdog_allowed_mask is empty. When |
---|
709 | | - * the threads are sucessfully initialized, take the proper locks and |
---|
| 674 | + * the threads are successfully initialized, take the proper locks and |
---|
710 | 675 | * unpark the threads in the watchdog_cpumask if the watchdog is enabled. |
---|
711 | 676 | */ |
---|
712 | 677 | static __init void lockup_detector_setup(void) |
---|
.. | .. |
---|
722 | 687 | return; |
---|
723 | 688 | |
---|
724 | 689 | mutex_lock(&watchdog_mutex); |
---|
725 | | - lockup_detector_reconfigure(); |
---|
| 690 | + __lockup_detector_reconfigure(); |
---|
726 | 691 | softlockup_initialized = true; |
---|
727 | 692 | mutex_unlock(&watchdog_mutex); |
---|
728 | 693 | } |
---|
729 | 694 | |
---|
730 | 695 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ |
---|
731 | | -static void lockup_detector_reconfigure(void) |
---|
| 696 | +static void __lockup_detector_reconfigure(void) |
---|
732 | 697 | { |
---|
733 | 698 | cpus_read_lock(); |
---|
734 | 699 | watchdog_nmi_stop(); |
---|
.. | .. |
---|
736 | 701 | watchdog_nmi_start(); |
---|
737 | 702 | cpus_read_unlock(); |
---|
738 | 703 | } |
---|
| 704 | +void lockup_detector_reconfigure(void) |
---|
| 705 | +{ |
---|
| 706 | + __lockup_detector_reconfigure(); |
---|
| 707 | +} |
---|
739 | 708 | static inline void lockup_detector_setup(void) |
---|
740 | 709 | { |
---|
741 | | - lockup_detector_reconfigure(); |
---|
| 710 | + __lockup_detector_reconfigure(); |
---|
742 | 711 | } |
---|
743 | 712 | #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ |
---|
744 | 713 | |
---|
.. | .. |
---|
778 | 747 | { |
---|
779 | 748 | /* Remove impossible cpus to keep sysctl output clean. */ |
---|
780 | 749 | cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); |
---|
781 | | - lockup_detector_reconfigure(); |
---|
| 750 | + __lockup_detector_reconfigure(); |
---|
782 | 751 | } |
---|
783 | 752 | |
---|
784 | 753 | /* |
---|
.. | .. |
---|
794 | 763 | * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED |
---|
795 | 764 | */ |
---|
796 | 765 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, |
---|
797 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 766 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
798 | 767 | { |
---|
799 | 768 | int err, old, *param = table->data; |
---|
800 | 769 | |
---|
.. | .. |
---|
821 | 790 | * /proc/sys/kernel/watchdog |
---|
822 | 791 | */ |
---|
823 | 792 | int proc_watchdog(struct ctl_table *table, int write, |
---|
824 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 793 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
825 | 794 | { |
---|
826 | 795 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED, |
---|
827 | 796 | table, write, buffer, lenp, ppos); |
---|
.. | .. |
---|
831 | 800 | * /proc/sys/kernel/nmi_watchdog |
---|
832 | 801 | */ |
---|
833 | 802 | int proc_nmi_watchdog(struct ctl_table *table, int write, |
---|
834 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 803 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
835 | 804 | { |
---|
836 | 805 | if (!nmi_watchdog_available && write) |
---|
837 | 806 | return -ENOTSUPP; |
---|
.. | .. |
---|
843 | 812 | * /proc/sys/kernel/soft_watchdog |
---|
844 | 813 | */ |
---|
845 | 814 | int proc_soft_watchdog(struct ctl_table *table, int write, |
---|
846 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 815 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
847 | 816 | { |
---|
848 | 817 | return proc_watchdog_common(SOFT_WATCHDOG_ENABLED, |
---|
849 | 818 | table, write, buffer, lenp, ppos); |
---|
.. | .. |
---|
853 | 822 | * /proc/sys/kernel/watchdog_thresh |
---|
854 | 823 | */ |
---|
855 | 824 | int proc_watchdog_thresh(struct ctl_table *table, int write, |
---|
856 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 825 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
857 | 826 | { |
---|
858 | 827 | int err, old; |
---|
859 | 828 | |
---|
.. | .. |
---|
876 | 845 | * been brought online, if desired. |
---|
877 | 846 | */ |
---|
878 | 847 | int proc_watchdog_cpumask(struct ctl_table *table, int write, |
---|
879 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 848 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
880 | 849 | { |
---|
881 | 850 | int err; |
---|
882 | 851 | |
---|