| .. | .. |
|---|
| 528 | 528 | } |
|---|
| 529 | 529 | } |
|---|
| 530 | 530 | |
|---|
| 531 | | -static void vmxoff_nmi(int cpu, struct pt_regs *regs) |
|---|
| 532 | | -{ |
|---|
| 533 | | - cpu_emergency_vmxoff(); |
|---|
| 534 | | -} |
|---|
| 531 | +static inline void nmi_shootdown_cpus_on_restart(void); |
|---|
| 535 | 532 | |
|---|
| 536 | | -/* Use NMIs as IPIs to tell all CPUs to disable virtualization */ |
|---|
| 537 | | -static void emergency_vmx_disable_all(void) |
|---|
| 533 | +static void emergency_reboot_disable_virtualization(void) |
|---|
| 538 | 534 | { |
|---|
| 539 | 535 | /* Just make sure we won't change CPUs while doing this */ |
|---|
| 540 | 536 | local_irq_disable(); |
|---|
| 541 | 537 | |
|---|
| 542 | 538 | /* |
|---|
| 543 | | - * Disable VMX on all CPUs before rebooting, otherwise we risk hanging |
|---|
| 544 | | - * the machine, because the CPU blocks INIT when it's in VMX root. |
|---|
| 539 | + * Disable virtualization on all CPUs before rebooting to avoid hanging |
|---|
| 540 | + * the system, as VMX and SVM block INIT when running in the host. |
|---|
| 545 | 541 | * |
|---|
| 546 | 542 | * We can't take any locks and we may be on an inconsistent state, so |
|---|
| 547 | | - * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. |
|---|
| 543 | + * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt. |
|---|
| 548 | 544 | * |
|---|
| 549 | | - * Do the NMI shootdown even if VMX if off on _this_ CPU, as that |
|---|
| 550 | | - * doesn't prevent a different CPU from being in VMX root operation. |
|---|
| 545 | + * Do the NMI shootdown even if virtualization is off on _this_ CPU, as |
|---|
| 546 | + * other CPUs may have virtualization enabled. |
|---|
| 551 | 547 | */ |
|---|
| 552 | | - if (cpu_has_vmx()) { |
|---|
| 553 | | - /* Safely force _this_ CPU out of VMX root operation. */ |
|---|
| 554 | | - __cpu_emergency_vmxoff(); |
|---|
| 548 | + if (cpu_has_vmx() || cpu_has_svm(NULL)) { |
|---|
| 549 | + /* Safely force _this_ CPU out of VMX/SVM operation. */ |
|---|
| 550 | + cpu_emergency_disable_virtualization(); |
|---|
| 555 | 551 | |
|---|
| 556 | | - /* Halt and exit VMX root operation on the other CPUs. */ |
|---|
| 557 | | - nmi_shootdown_cpus(vmxoff_nmi); |
|---|
| 552 | + /* Disable VMX/SVM and halt on other CPUs. */ |
|---|
| 553 | + nmi_shootdown_cpus_on_restart(); |
|---|
| 558 | 554 | } |
|---|
| 559 | 555 | } |
|---|
| 560 | 556 | |
|---|
| .. | .. |
|---|
| 590 | 586 | unsigned short mode; |
|---|
| 591 | 587 | |
|---|
| 592 | 588 | if (reboot_emergency) |
|---|
| 593 | | - emergency_vmx_disable_all(); |
|---|
| 589 | + emergency_reboot_disable_virtualization(); |
|---|
| 594 | 590 | |
|---|
| 595 | 591 | tboot_shutdown(TB_SHUTDOWN_REBOOT); |
|---|
| 596 | 592 | |
|---|
| .. | .. |
|---|
| 795 | 791 | /* This is the CPU performing the emergency shutdown work. */ |
|---|
| 796 | 792 | int crashing_cpu = -1; |
|---|
| 797 | 793 | |
|---|
| 794 | +/* |
|---|
| 795 | + * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during |
|---|
| 796 | + * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if |
|---|
| 797 | + * GIF=0, i.e. if the crash occurred between CLGI and STGI. |
|---|
| 798 | + */ |
|---|
| 799 | +void cpu_emergency_disable_virtualization(void) |
|---|
| 800 | +{ |
|---|
| 801 | + cpu_emergency_vmxoff(); |
|---|
| 802 | + cpu_emergency_svm_disable(); |
|---|
| 803 | +} |
|---|
| 804 | + |
|---|
| 798 | 805 | #if defined(CONFIG_SMP) |
|---|
| 799 | 806 | |
|---|
| 800 | 807 | static nmi_shootdown_cb shootdown_callback; |
|---|
| .. | .. |
|---|
| 817 | 824 | return NMI_HANDLED; |
|---|
| 818 | 825 | local_irq_disable(); |
|---|
| 819 | 826 | |
|---|
| 820 | | - shootdown_callback(cpu, regs); |
|---|
| 827 | + if (shootdown_callback) |
|---|
| 828 | + shootdown_callback(cpu, regs); |
|---|
| 829 | + |
|---|
| 830 | + /* |
|---|
| 831 | + * Prepare the CPU for reboot _after_ invoking the callback so that the |
|---|
| 832 | + * callback can safely use virtualization instructions, e.g. VMCLEAR. |
|---|
| 833 | + */ |
|---|
| 834 | + cpu_emergency_disable_virtualization(); |
|---|
| 821 | 835 | |
|---|
| 822 | 836 | atomic_dec(&waiting_for_crash_ipi); |
|---|
| 823 | 837 | /* Assume hlt works */ |
|---|
| .. | .. |
|---|
| 828 | 842 | return NMI_HANDLED; |
|---|
| 829 | 843 | } |
|---|
| 830 | 844 | |
|---|
| 831 | | -/* |
|---|
| 832 | | - * Halt all other CPUs, calling the specified function on each of them |
|---|
| 845 | +/** |
|---|
| 846 | + * nmi_shootdown_cpus - Stop other CPUs via NMI |
|---|
| 847 | + * @callback: Optional callback to be invoked from the NMI handler |
|---|
| 833 | 848 | * |
|---|
| 834 | | - * This function can be used to halt all other CPUs on crash |
|---|
| 835 | | - * or emergency reboot time. The function passed as parameter |
|---|
| 836 | | - * will be called inside a NMI handler on all CPUs. |
|---|
| 849 | + * The NMI handler on the remote CPUs invokes @callback, if not |
|---|
| 850 | + * NULL, first and then disables virtualization to ensure that |
|---|
| 851 | + * INIT is recognized during reboot. |
|---|
| 852 | + * |
|---|
| 853 | + * nmi_shootdown_cpus() can only be invoked once. After the first |
|---|
| 854 | + * invocation all other CPUs are stuck in crash_nmi_callback() and |
|---|
| 855 | + * cannot respond to a second NMI. |
|---|
| 837 | 856 | */ |
|---|
| 838 | 857 | void nmi_shootdown_cpus(nmi_shootdown_cb callback) |
|---|
| 839 | 858 | { |
|---|
| 840 | 859 | unsigned long msecs; |
|---|
| 860 | + |
|---|
| 841 | 861 | local_irq_disable(); |
|---|
| 862 | + |
|---|
| 863 | + /* |
|---|
| 864 | + * Avoid certain doom if a shootdown already occurred; re-registering |
|---|
| 865 | + * the NMI handler will cause list corruption, modifying the callback |
|---|
| 866 | + * will do who knows what, etc... |
|---|
| 867 | + */ |
|---|
| 868 | + if (WARN_ON_ONCE(crash_ipi_issued)) |
|---|
| 869 | + return; |
|---|
| 842 | 870 | |
|---|
| 843 | 871 | /* Make a note of crashing cpu. Will be used in NMI callback. */ |
|---|
| 844 | 872 | crashing_cpu = safe_smp_processor_id(); |
|---|
| .. | .. |
|---|
| 867 | 895 | msecs--; |
|---|
| 868 | 896 | } |
|---|
| 869 | 897 | |
|---|
| 870 | | - /* Leave the nmi callback set */ |
|---|
| 898 | + /* |
|---|
| 899 | + * Leave the nmi callback set, shootdown is a one-time thing. Clearing |
|---|
| 900 | + * the callback could result in a NULL pointer dereference if a CPU |
|---|
| 901 | + * (finally) responds after the timeout expires. |
|---|
| 902 | + */ |
|---|
| 903 | +} |
|---|
| 904 | + |
|---|
| 905 | +static inline void nmi_shootdown_cpus_on_restart(void) |
|---|
| 906 | +{ |
|---|
| 907 | + if (!crash_ipi_issued) |
|---|
| 908 | + nmi_shootdown_cpus(NULL); |
|---|
| 871 | 909 | } |
|---|
| 872 | 910 | |
|---|
| 873 | 911 | /* |
|---|
| .. | .. |
|---|
| 897 | 935 | /* No other CPUs to shoot down */ |
|---|
| 898 | 936 | } |
|---|
| 899 | 937 | |
|---|
| 938 | +static inline void nmi_shootdown_cpus_on_restart(void) { } |
|---|
| 939 | + |
|---|
| 900 | 940 | void run_crash_ipi_callback(struct pt_regs *regs) |
|---|
| 901 | 941 | { |
|---|
| 902 | 942 | } |
|---|