hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/x86/kvm/cpuid.h
....@@ -5,9 +5,13 @@
55 #include "x86.h"
66 #include <asm/cpu.h>
77 #include <asm/processor.h>
8
+#include <uapi/asm/kvm_para.h>
89
9
-int kvm_update_cpuid(struct kvm_vcpu *vcpu);
10
-bool kvm_mpx_supported(void);
10
+extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
11
+void kvm_set_cpu_caps(void);
12
+
13
+void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14
+void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
1115 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1216 u32 function, u32 index);
1317 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
....@@ -23,13 +27,18 @@
2327 struct kvm_cpuid2 *cpuid,
2428 struct kvm_cpuid_entry2 __user *entries);
2529 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
26
- u32 *ecx, u32 *edx, bool check_limit);
30
+ u32 *ecx, u32 *edx, bool exact_only);
2731
2832 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
2933
3034 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3135 {
3236 return vcpu->arch.maxphyaddr;
37
+}
38
+
39
+static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
40
+{
41
+ return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
3342 }
3443
3544 struct cpuid_reg {
....@@ -53,28 +62,54 @@
5362 [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
5463 [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
5564 [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
65
+ [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
66
+ [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
5667 };
5768
58
-static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
69
+/*
70
+ * Reverse CPUID and its derivatives can only be used for hardware-defined
71
+ * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
72
+ * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
73
+ * is nonsensical as the bit number/mask is an arbitrary software-defined value
74
+ * and can't be used by KVM to query/control guest capabilities. And obviously
75
+ * the leaf being queried must have an entry in the lookup table.
76
+ */
77
+static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
5978 {
60
- unsigned x86_leaf = x86_feature / 32;
61
-
79
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
80
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
81
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
82
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
6283 BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
6384 BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
85
+}
6486
87
+/*
88
+ * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
89
+ * the hardware defined bit number (stored in bits 4:0) and a software defined
90
+ * "word" (stored in bits 31:5). The word is used to index into arrays of
91
+ * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
92
+ */
93
+static __always_inline u32 __feature_bit(int x86_feature)
94
+{
95
+ reverse_cpuid_check(x86_feature / 32);
96
+ return 1 << (x86_feature & 31);
97
+}
98
+
99
+#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
100
+
101
+static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
102
+{
103
+ unsigned int x86_leaf = x86_feature / 32;
104
+
105
+ reverse_cpuid_check(x86_leaf);
65106 return reverse_cpuid[x86_leaf];
66107 }
67108
68
-static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
109
+static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
110
+ u32 reg)
69111 {
70
- struct kvm_cpuid_entry2 *entry;
71
- const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
72
-
73
- entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
74
- if (!entry)
75
- return NULL;
76
-
77
- switch (cpuid.reg) {
112
+ switch (reg) {
78113 case CPUID_EAX:
79114 return &entry->eax;
80115 case CPUID_EBX:
....@@ -89,36 +124,112 @@
89124 }
90125 }
91126
92
-static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
127
+static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
128
+ unsigned int x86_feature)
93129 {
94
- int *reg;
130
+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
95131
96
- if (x86_feature == X86_FEATURE_XSAVE &&
97
- !static_cpu_has(X86_FEATURE_XSAVE))
98
- return false;
132
+ return __cpuid_entry_get_reg(entry, cpuid.reg);
133
+}
134
+
135
+static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
136
+ unsigned int x86_feature)
137
+{
138
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
139
+
140
+ return *reg & __feature_bit(x86_feature);
141
+}
142
+
143
+static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
144
+ unsigned int x86_feature)
145
+{
146
+ return cpuid_entry_get(entry, x86_feature);
147
+}
148
+
149
+static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
150
+ unsigned int x86_feature)
151
+{
152
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
153
+
154
+ *reg &= ~__feature_bit(x86_feature);
155
+}
156
+
157
+static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
158
+ unsigned int x86_feature)
159
+{
160
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
161
+
162
+ *reg |= __feature_bit(x86_feature);
163
+}
164
+
165
+static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
166
+ unsigned int x86_feature,
167
+ bool set)
168
+{
169
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
170
+
171
+ /*
172
+ * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
173
+ * compiler into using CMOV instead of Jcc when possible.
174
+ */
175
+ if (set)
176
+ *reg |= __feature_bit(x86_feature);
177
+ else
178
+ *reg &= ~__feature_bit(x86_feature);
179
+}
180
+
181
+static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
182
+ enum cpuid_leafs leaf)
183
+{
184
+ u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
185
+
186
+ BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
187
+ *reg = kvm_cpu_caps[leaf];
188
+}
189
+
190
+static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
191
+ unsigned int x86_feature)
192
+{
193
+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
194
+ struct kvm_cpuid_entry2 *entry;
195
+
196
+ entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
197
+ if (!entry)
198
+ return NULL;
199
+
200
+ return __cpuid_entry_get_reg(entry, cpuid.reg);
201
+}
202
+
203
+static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
204
+ unsigned int x86_feature)
205
+{
206
+ u32 *reg;
99207
100208 reg = guest_cpuid_get_register(vcpu, x86_feature);
101209 if (!reg)
102210 return false;
103211
104
- return *reg & bit(x86_feature);
212
+ return *reg & __feature_bit(x86_feature);
105213 }
106214
107
-static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
215
+static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
216
+ unsigned int x86_feature)
108217 {
109
- int *reg;
218
+ u32 *reg;
110219
111220 reg = guest_cpuid_get_register(vcpu, x86_feature);
112221 if (reg)
113
- *reg &= ~bit(x86_feature);
222
+ *reg &= ~__feature_bit(x86_feature);
114223 }
115224
116
-static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
225
+static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
117226 {
118227 struct kvm_cpuid_entry2 *best;
119228
120229 best = kvm_find_cpuid_entry(vcpu, 0, 0);
121
- return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
230
+ return best &&
231
+ (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
232
+ is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
122233 }
123234
124235 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
....@@ -179,4 +290,53 @@
179290 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
180291 }
181292
293
+static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
294
+{
295
+ unsigned int x86_leaf = x86_feature / 32;
296
+
297
+ reverse_cpuid_check(x86_leaf);
298
+ kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
299
+}
300
+
301
+static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
302
+{
303
+ unsigned int x86_leaf = x86_feature / 32;
304
+
305
+ reverse_cpuid_check(x86_leaf);
306
+ kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
307
+}
308
+
309
+static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
310
+{
311
+ unsigned int x86_leaf = x86_feature / 32;
312
+
313
+ reverse_cpuid_check(x86_leaf);
314
+ return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
315
+}
316
+
317
+static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
318
+{
319
+ return !!kvm_cpu_cap_get(x86_feature);
320
+}
321
+
322
+static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
323
+{
324
+ if (boot_cpu_has(x86_feature))
325
+ kvm_cpu_cap_set(x86_feature);
326
+}
327
+
328
+static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
329
+{
330
+ return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
331
+}
332
+
333
+static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
334
+ unsigned int kvm_feature)
335
+{
336
+ if (!vcpu->arch.pv_cpuid.enforce)
337
+ return true;
338
+
339
+ return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
340
+}
341
+
182342 #endif