hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/kvm/x86.h
....@@ -5,6 +5,7 @@
55 #include <linux/kvm_host.h>
66 #include <asm/pvclock.h>
77 #include "kvm_cache_regs.h"
8
+#include "kvm_emulate.h"
89
910 #define KVM_DEFAULT_PLE_GAP 128
1011 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
....@@ -96,7 +97,7 @@
9697
9798 if (!is_long_mode(vcpu))
9899 return false;
99
- kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
100
+ kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
100101 return cs_l;
101102 }
102103
....@@ -124,6 +125,12 @@
124125 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
125126 }
126127
128
+static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
129
+{
130
+ ++vcpu->stat.tlb_flush;
131
+ kvm_x86_ops.tlb_flush_current(vcpu);
132
+}
133
+
127134 static inline int is_pae(struct kvm_vcpu *vcpu)
128135 {
129136 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
....@@ -144,19 +151,9 @@
144151 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
145152 }
146153
147
-static inline u32 bit(int bitno)
148
-{
149
- return 1 << (bitno & 31);
150
-}
151
-
152154 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
153155 {
154156 return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
155
-}
156
-
157
-static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
158
-{
159
- return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
160157 }
161158
162159 static inline u64 get_canonical(u64 la, u8 vaddr_bits)
....@@ -166,21 +163,7 @@
166163
167164 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
168165 {
169
-#ifdef CONFIG_X86_64
170166 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
171
-#else
172
- return false;
173
-#endif
174
-}
175
-
176
-static inline bool emul_is_noncanonical_address(u64 la,
177
- struct x86_emulate_ctxt *ctxt)
178
-{
179
-#ifdef CONFIG_X86_64
180
- return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
181
-#else
182
- return false;
183
-#endif
184167 }
185168
186169 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
....@@ -188,7 +171,7 @@
188171 {
189172 u64 gen = kvm_memslots(vcpu->kvm)->generation;
190173
191
- if (unlikely(gen & 1))
174
+ if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
192175 return;
193176
194177 /*
....@@ -196,7 +179,7 @@
196179 * actually a nGPA.
197180 */
198181 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
199
- vcpu->arch.access = access;
182
+ vcpu->arch.mmio_access = access;
200183 vcpu->arch.mmio_gfn = gfn;
201184 vcpu->arch.mmio_gen = gen;
202185 }
....@@ -238,8 +221,7 @@
238221 return false;
239222 }
240223
241
-static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
242
- enum kvm_reg reg)
224
+static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
243225 {
244226 unsigned long val = kvm_register_read(vcpu, reg);
245227
....@@ -247,8 +229,7 @@
247229 }
248230
249231 static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
250
- enum kvm_reg reg,
251
- unsigned long val)
232
+ int reg, unsigned long val)
252233 {
253234 if (!is_64_bit_mode(vcpu))
254235 val = (u32)val;
....@@ -260,8 +241,12 @@
260241 return !(kvm->arch.disabled_quirks & quirk);
261242 }
262243
263
-void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
264
-int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
244
+static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
245
+{
246
+ return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
247
+}
248
+
249
+void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
265250
266251 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
267252 u64 get_kvmclock_ns(struct kvm *kvm);
....@@ -276,6 +261,8 @@
276261
277262 int handle_ud(struct kvm_vcpu *vcpu);
278263
264
+void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
265
+
279266 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
280267 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
281268 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
....@@ -284,22 +271,28 @@
284271 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
285272 int page_num);
286273 bool kvm_vector_hashing_enabled(void);
274
+void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
275
+int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
276
+ void *insn, int insn_len);
287277 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
288278 int emulation_type, void *insn, int insn_len);
279
+fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
289280
290
-#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
291
- | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
292
- | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
293
- | XFEATURE_MASK_PKRU)
294281 extern u64 host_xcr0;
282
+extern u64 supported_xcr0;
283
+extern u64 supported_xss;
295284
296
-extern u64 kvm_supported_xcr0(void);
285
+static inline bool kvm_mpx_supported(void)
286
+{
287
+ return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
288
+ == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
289
+}
297290
298291 extern unsigned int min_timer_period_us;
299292
300
-extern unsigned int lapic_timer_advance_ns;
301
-
302293 extern bool enable_vmware_backdoor;
294
+
295
+extern int pi_inject_timer;
303296
304297 extern struct static_key kvm_no_apic_vcpu;
305298
....@@ -338,6 +331,11 @@
338331 return kvm->arch.pause_in_guest;
339332 }
340333
334
+static inline bool kvm_cstate_in_guest(struct kvm *kvm)
335
+{
336
+ return kvm->arch.cstate_in_guest;
337
+}
338
+
341339 DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
342340
343341 static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
....@@ -359,7 +357,58 @@
359357 return (data | ((data & 0x0202020202020202ull) << 1)) == data;
360358 }
361359
362
-void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
363
-void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
360
+static inline bool kvm_dr7_valid(u64 data)
361
+{
362
+ /* Bits [63:32] are reserved */
363
+ return !(data >> 32);
364
+}
365
+static inline bool kvm_dr6_valid(u64 data)
366
+{
367
+ /* Bits [63:32] are reserved */
368
+ return !(data >> 32);
369
+}
370
+
371
+void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
372
+void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
373
+int kvm_spec_ctrl_test_value(u64 value);
374
+int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
375
+bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
376
+int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
377
+ struct x86_exception *e);
378
+int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
379
+bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
380
+
381
+/*
382
+ * Internal error codes that are used to indicate that MSR emulation encountered
383
+ * an error that should result in #GP in the guest, unless userspace
384
+ * handles it.
385
+ */
386
+#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
387
+#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
388
+
389
+#define __cr4_reserved_bits(__cpu_has, __c) \
390
+({ \
391
+ u64 __reserved_bits = CR4_RESERVED_BITS; \
392
+ \
393
+ if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
394
+ __reserved_bits |= X86_CR4_OSXSAVE; \
395
+ if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
396
+ __reserved_bits |= X86_CR4_SMEP; \
397
+ if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
398
+ __reserved_bits |= X86_CR4_SMAP; \
399
+ if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
400
+ __reserved_bits |= X86_CR4_FSGSBASE; \
401
+ if (!__cpu_has(__c, X86_FEATURE_PKU)) \
402
+ __reserved_bits |= X86_CR4_PKE; \
403
+ if (!__cpu_has(__c, X86_FEATURE_LA57)) \
404
+ __reserved_bits |= X86_CR4_LA57; \
405
+ if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
406
+ __reserved_bits |= X86_CR4_UMIP; \
407
+ if (!__cpu_has(__c, X86_FEATURE_VMX)) \
408
+ __reserved_bits |= X86_CR4_VMXE; \
409
+ if (!__cpu_has(__c, X86_FEATURE_PCID)) \
410
+ __reserved_bits |= X86_CR4_PCIDE; \
411
+ __reserved_bits; \
412
+})
364413
365414 #endif