| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* CPU virtualization extensions handling |
|---|
| 2 | 3 | * |
|---|
| 3 | 4 | * This should carry the code for handling CPU virtualization extensions |
|---|
| .. | .. |
|---|
| 8 | 9 | * Copyright (C) 2008, Red Hat Inc. |
|---|
| 9 | 10 | * |
|---|
| 10 | 11 | * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc. |
|---|
| 11 | | - * |
|---|
| 12 | | - * This work is licensed under the terms of the GNU GPL, version 2. See |
|---|
| 13 | | - * the COPYING file in the top-level directory. |
|---|
| 14 | 12 | */ |
|---|
| 15 | 13 | #ifndef _ASM_X86_VIRTEX_H |
|---|
| 16 | 14 | #define _ASM_X86_VIRTEX_H |
|---|
| .. | .. |
|---|
| 32 | 30 | } |
|---|
| 33 | 31 | |
|---|
| 34 | 32 | |
|---|
| 35 | | -/** Disable VMX on the current CPU |
|---|
| 33 | +/** |
|---|
| 34 | + * cpu_vmxoff() - Disable VMX on the current CPU |
|---|
| 36 | 35 | * |
|---|
| 37 | | - * vmxoff causes a undefined-opcode exception if vmxon was not run |
|---|
| 38 | | - * on the CPU previously. Only call this function if you know VMX |
|---|
| 39 | | - * is enabled. |
|---|
| 36 | + * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) |
|---|
| 37 | + * |
|---|
| 38 | + * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to |
|---|
| 39 | + * atomically track post-VMXON state, e.g. this may be called in NMI context. |
|---|
| 40 | + * Eat all faults as all other faults on VMXOFF faults are mode related, i.e. |
|---|
| 41 | + * faults are guaranteed to be due to the !post-VMXON check unless the CPU is |
|---|
| 42 | + * magically in RM, VM86, compat mode, or at CPL>0. |
|---|
| 40 | 43 | */ |
|---|
| 41 | 44 | static inline void cpu_vmxoff(void) |
|---|
| 42 | 45 | { |
|---|
| 43 | | - asm volatile (ASM_VMX_VMXOFF : : : "cc"); |
|---|
| 46 | + asm_volatile_goto("1: vmxoff\n\t" |
|---|
| 47 | + _ASM_EXTABLE(1b, %l[fault]) :::: fault); |
|---|
| 48 | +fault: |
|---|
| 44 | 49 | cr4_clear_bits(X86_CR4_VMXE); |
|---|
| 45 | 50 | } |
|---|
| 46 | 51 | |
|---|
| .. | .. |
|---|
| 83 | 88 | */ |
|---|
| 84 | 89 | static inline int cpu_has_svm(const char **msg) |
|---|
| 85 | 90 | { |
|---|
| 86 | | - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { |
|---|
| 91 | + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
|---|
| 92 | + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) { |
|---|
| 87 | 93 | if (msg) |
|---|
| 88 | | - *msg = "not amd"; |
|---|
| 89 | | - return 0; |
|---|
| 90 | | - } |
|---|
| 91 | | - |
|---|
| 92 | | - if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) { |
|---|
| 93 | | - if (msg) |
|---|
| 94 | | - *msg = "can't execute cpuid_8000000a"; |
|---|
| 94 | + *msg = "not amd or hygon"; |
|---|
| 95 | 95 | return 0; |
|---|
| 96 | 96 | } |
|---|
| 97 | 97 | |
|---|
| .. | .. |
|---|
| 114 | 114 | |
|---|
| 115 | 115 | wrmsrl(MSR_VM_HSAVE_PA, 0); |
|---|
| 116 | 116 | rdmsrl(MSR_EFER, efer); |
|---|
| 117 | | - wrmsrl(MSR_EFER, efer & ~EFER_SVME); |
|---|
| 117 | + if (efer & EFER_SVME) { |
|---|
| 118 | + /* |
|---|
| 119 | + * Force GIF=1 prior to disabling SVM to ensure INIT and NMI |
|---|
| 120 | + * aren't blocked, e.g. if a fatal error occurred between CLGI |
|---|
| 121 | + * and STGI. Note, STGI may #UD if SVM is disabled from NMI |
|---|
| 122 | + * context between reading EFER and executing STGI. In that |
|---|
| 123 | + * case, GIF must already be set, otherwise the NMI would have |
|---|
| 124 | + * been blocked, so just eat the fault. |
|---|
| 125 | + */ |
|---|
| 126 | + asm_volatile_goto("1: stgi\n\t" |
|---|
| 127 | + _ASM_EXTABLE(1b, %l[fault]) |
|---|
| 128 | + ::: "memory" : fault); |
|---|
| 129 | +fault: |
|---|
| 130 | + wrmsrl(MSR_EFER, efer & ~EFER_SVME); |
|---|
| 131 | + } |
|---|
| 118 | 132 | } |
|---|
| 119 | 133 | |
|---|
| 120 | 134 | /** Makes sure SVM is disabled, if it is supported on the CPU |
|---|