hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/virtext.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /* CPU virtualization extensions handling
23 *
34 * This should carry the code for handling CPU virtualization extensions
....@@ -8,9 +9,6 @@
89 * Copyright (C) 2008, Red Hat Inc.
910 *
1011 * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
11
- *
12
- * This work is licensed under the terms of the GNU GPL, version 2. See
13
- * the COPYING file in the top-level directory.
1412 */
1513 #ifndef _ASM_X86_VIRTEX_H
1614 #define _ASM_X86_VIRTEX_H
....@@ -32,15 +30,22 @@
3230 }
3331
3432
35
-/** Disable VMX on the current CPU
33
+/**
34
+ * cpu_vmxoff() - Disable VMX on the current CPU
3635 *
37
- * vmxoff causes a undefined-opcode exception if vmxon was not run
38
- * on the CPU previously. Only call this function if you know VMX
39
- * is enabled.
36
+ * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
37
+ *
38
+ * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
39
+ * atomically track post-VMXON state, e.g. this may be called in NMI context.
40
+ * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
41
+ * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
42
+ * magically in RM, VM86, compat mode, or at CPL>0.
4043 */
4144 static inline void cpu_vmxoff(void)
4245 {
43
- asm volatile (ASM_VMX_VMXOFF : : : "cc");
46
+ asm_volatile_goto("1: vmxoff\n\t"
47
+ _ASM_EXTABLE(1b, %l[fault]) :::: fault);
48
+fault:
4449 cr4_clear_bits(X86_CR4_VMXE);
4550 }
4651
....@@ -83,15 +88,10 @@
8388 */
8489 static inline int cpu_has_svm(const char **msg)
8590 {
86
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
91
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
92
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
8793 if (msg)
88
- *msg = "not amd";
89
- return 0;
90
- }
91
-
92
- if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
93
- if (msg)
94
- *msg = "can't execute cpuid_8000000a";
94
+ *msg = "not amd or hygon";
9595 return 0;
9696 }
9797
....@@ -114,7 +114,21 @@
114114
115115 wrmsrl(MSR_VM_HSAVE_PA, 0);
116116 rdmsrl(MSR_EFER, efer);
117
- wrmsrl(MSR_EFER, efer & ~EFER_SVME);
117
+ if (efer & EFER_SVME) {
118
+ /*
119
+ * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
120
+ * aren't blocked, e.g. if a fatal error occurred between CLGI
121
+ * and STGI. Note, STGI may #UD if SVM is disabled from NMI
122
+ * context between reading EFER and executing STGI. In that
123
+ * case, GIF must already be set, otherwise the NMI would have
124
+ * been blocked, so just eat the fault.
125
+ */
126
+ asm_volatile_goto("1: stgi\n\t"
127
+ _ASM_EXTABLE(1b, %l[fault])
128
+ ::: "memory" : fault);
129
+fault:
130
+ wrmsrl(MSR_EFER, efer & ~EFER_SVME);
131
+ }
118132 }
119133
120134 /** Makes sure SVM is disabled, if it is supported on the CPU