| .. | .. |
|---|
| 5 | 5 | #include <linux/kvm_host.h> |
|---|
| 6 | 6 | #include <asm/pvclock.h> |
|---|
| 7 | 7 | #include "kvm_cache_regs.h" |
|---|
| 8 | +#include "kvm_emulate.h" |
|---|
| 8 | 9 | |
|---|
| 9 | 10 | #define KVM_DEFAULT_PLE_GAP 128 |
|---|
| 10 | 11 | #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 |
|---|
| .. | .. |
|---|
| 96 | 97 | |
|---|
| 97 | 98 | if (!is_long_mode(vcpu)) |
|---|
| 98 | 99 | return false; |
|---|
| 99 | | - kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
|---|
| 100 | + kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
|---|
| 100 | 101 | return cs_l; |
|---|
| 101 | 102 | } |
|---|
| 102 | 103 | |
|---|
| .. | .. |
|---|
| 124 | 125 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; |
|---|
| 125 | 126 | } |
|---|
| 126 | 127 | |
|---|
| 128 | +static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) |
|---|
| 129 | +{ |
|---|
| 130 | + ++vcpu->stat.tlb_flush; |
|---|
| 131 | + kvm_x86_ops.tlb_flush_current(vcpu); |
|---|
| 132 | +} |
|---|
| 133 | + |
|---|
| 127 | 134 | static inline int is_pae(struct kvm_vcpu *vcpu) |
|---|
| 128 | 135 | { |
|---|
| 129 | 136 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); |
|---|
| .. | .. |
|---|
| 144 | 151 | return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); |
|---|
| 145 | 152 | } |
|---|
| 146 | 153 | |
|---|
| 147 | | -static inline u32 bit(int bitno) |
|---|
| 148 | | -{ |
|---|
| 149 | | - return 1 << (bitno & 31); |
|---|
| 150 | | -} |
|---|
| 151 | | - |
|---|
| 152 | 154 | static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) |
|---|
| 153 | 155 | { |
|---|
| 154 | 156 | return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; |
|---|
| 155 | | -} |
|---|
| 156 | | - |
|---|
| 157 | | -static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) |
|---|
| 158 | | -{ |
|---|
| 159 | | - return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; |
|---|
| 160 | 157 | } |
|---|
| 161 | 158 | |
|---|
| 162 | 159 | static inline u64 get_canonical(u64 la, u8 vaddr_bits) |
|---|
| .. | .. |
|---|
| 166 | 163 | |
|---|
| 167 | 164 | static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) |
|---|
| 168 | 165 | { |
|---|
| 169 | | -#ifdef CONFIG_X86_64 |
|---|
| 170 | 166 | return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; |
|---|
| 171 | | -#else |
|---|
| 172 | | - return false; |
|---|
| 173 | | -#endif |
|---|
| 174 | | -} |
|---|
| 175 | | - |
|---|
| 176 | | -static inline bool emul_is_noncanonical_address(u64 la, |
|---|
| 177 | | - struct x86_emulate_ctxt *ctxt) |
|---|
| 178 | | -{ |
|---|
| 179 | | -#ifdef CONFIG_X86_64 |
|---|
| 180 | | - return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; |
|---|
| 181 | | -#else |
|---|
| 182 | | - return false; |
|---|
| 183 | | -#endif |
|---|
| 184 | 167 | } |
|---|
| 185 | 168 | |
|---|
| 186 | 169 | static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, |
|---|
| .. | .. |
|---|
| 188 | 171 | { |
|---|
| 189 | 172 | u64 gen = kvm_memslots(vcpu->kvm)->generation; |
|---|
| 190 | 173 | |
|---|
| 191 | | - if (unlikely(gen & 1)) |
|---|
| 174 | + if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) |
|---|
| 192 | 175 | return; |
|---|
| 193 | 176 | |
|---|
| 194 | 177 | /* |
|---|
| .. | .. |
|---|
| 196 | 179 | * actually a nGPA. |
|---|
| 197 | 180 | */ |
|---|
| 198 | 181 | vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; |
|---|
| 199 | | - vcpu->arch.access = access; |
|---|
| 182 | + vcpu->arch.mmio_access = access; |
|---|
| 200 | 183 | vcpu->arch.mmio_gfn = gfn; |
|---|
| 201 | 184 | vcpu->arch.mmio_gen = gen; |
|---|
| 202 | 185 | } |
|---|
| .. | .. |
|---|
| 238 | 221 | return false; |
|---|
| 239 | 222 | } |
|---|
| 240 | 223 | |
|---|
| 241 | | -static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, |
|---|
| 242 | | - enum kvm_reg reg) |
|---|
| 224 | +static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg) |
|---|
| 243 | 225 | { |
|---|
| 244 | 226 | unsigned long val = kvm_register_read(vcpu, reg); |
|---|
| 245 | 227 | |
|---|
| .. | .. |
|---|
| 247 | 229 | } |
|---|
| 248 | 230 | |
|---|
| 249 | 231 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, |
|---|
| 250 | | - enum kvm_reg reg, |
|---|
| 251 | | - unsigned long val) |
|---|
| 232 | + int reg, unsigned long val) |
|---|
| 252 | 233 | { |
|---|
| 253 | 234 | if (!is_64_bit_mode(vcpu)) |
|---|
| 254 | 235 | val = (u32)val; |
|---|
| .. | .. |
|---|
| 260 | 241 | return !(kvm->arch.disabled_quirks & quirk); |
|---|
| 261 | 242 | } |
|---|
| 262 | 243 | |
|---|
| 263 | | -void kvm_set_pending_timer(struct kvm_vcpu *vcpu); |
|---|
| 264 | | -int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
|---|
| 244 | +static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) |
|---|
| 245 | +{ |
|---|
| 246 | + return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu); |
|---|
| 247 | +} |
|---|
| 248 | + |
|---|
| 249 | +void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
|---|
| 265 | 250 | |
|---|
| 266 | 251 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); |
|---|
| 267 | 252 | u64 get_kvmclock_ns(struct kvm *kvm); |
|---|
| .. | .. |
|---|
| 276 | 261 | |
|---|
| 277 | 262 | int handle_ud(struct kvm_vcpu *vcpu); |
|---|
| 278 | 263 | |
|---|
| 264 | +void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu); |
|---|
| 265 | + |
|---|
| 279 | 266 | void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); |
|---|
| 280 | 267 | u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); |
|---|
| 281 | 268 | bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
|---|
| .. | .. |
|---|
| 284 | 271 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, |
|---|
| 285 | 272 | int page_num); |
|---|
| 286 | 273 | bool kvm_vector_hashing_enabled(void); |
|---|
| 274 | +void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); |
|---|
| 275 | +int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, |
|---|
| 276 | + void *insn, int insn_len); |
|---|
| 287 | 277 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
|---|
| 288 | 278 | int emulation_type, void *insn, int insn_len); |
|---|
| 279 | +fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); |
|---|
| 289 | 280 | |
|---|
| 290 | | -#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ |
|---|
| 291 | | - | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ |
|---|
| 292 | | - | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ |
|---|
| 293 | | - | XFEATURE_MASK_PKRU) |
|---|
| 294 | 281 | extern u64 host_xcr0; |
|---|
| 282 | +extern u64 supported_xcr0; |
|---|
| 283 | +extern u64 supported_xss; |
|---|
| 295 | 284 | |
|---|
| 296 | | -extern u64 kvm_supported_xcr0(void); |
|---|
| 285 | +static inline bool kvm_mpx_supported(void) |
|---|
| 286 | +{ |
|---|
| 287 | + return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) |
|---|
| 288 | + == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); |
|---|
| 289 | +} |
|---|
| 297 | 290 | |
|---|
| 298 | 291 | extern unsigned int min_timer_period_us; |
|---|
| 299 | 292 | |
|---|
| 300 | | -extern unsigned int lapic_timer_advance_ns; |
|---|
| 301 | | - |
|---|
| 302 | 293 | extern bool enable_vmware_backdoor; |
|---|
| 294 | + |
|---|
| 295 | +extern int pi_inject_timer; |
|---|
| 303 | 296 | |
|---|
| 304 | 297 | extern struct static_key kvm_no_apic_vcpu; |
|---|
| 305 | 298 | |
|---|
| .. | .. |
|---|
| 338 | 331 | return kvm->arch.pause_in_guest; |
|---|
| 339 | 332 | } |
|---|
| 340 | 333 | |
|---|
| 334 | +static inline bool kvm_cstate_in_guest(struct kvm *kvm) |
|---|
| 335 | +{ |
|---|
| 336 | + return kvm->arch.cstate_in_guest; |
|---|
| 337 | +} |
|---|
| 338 | + |
|---|
| 341 | 339 | DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu); |
|---|
| 342 | 340 | |
|---|
| 343 | 341 | static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu) |
|---|
| .. | .. |
|---|
| 359 | 357 | return (data | ((data & 0x0202020202020202ull) << 1)) == data; |
|---|
| 360 | 358 | } |
|---|
| 361 | 359 | |
|---|
| 362 | | -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); |
|---|
| 363 | | -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); |
|---|
| 360 | +static inline bool kvm_dr7_valid(u64 data) |
|---|
| 361 | +{ |
|---|
| 362 | + /* Bits [63:32] are reserved */ |
|---|
| 363 | + return !(data >> 32); |
|---|
| 364 | +} |
|---|
| 365 | +static inline bool kvm_dr6_valid(u64 data) |
|---|
| 366 | +{ |
|---|
| 367 | + /* Bits [63:32] are reserved */ |
|---|
| 368 | + return !(data >> 32); |
|---|
| 369 | +} |
|---|
| 370 | + |
|---|
| 371 | +void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); |
|---|
| 372 | +void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); |
|---|
| 373 | +int kvm_spec_ctrl_test_value(u64 value); |
|---|
| 374 | +int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
|---|
| 375 | +bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); |
|---|
| 376 | +int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, |
|---|
| 377 | + struct x86_exception *e); |
|---|
| 378 | +int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); |
|---|
| 379 | +bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); |
|---|
| 380 | + |
|---|
| 381 | +/* |
|---|
| 382 | + * Internal error codes that are used to indicate that MSR emulation encountered |
|---|
| 383 | + * an error that should result in #GP in the guest, unless userspace |
|---|
| 384 | + * handles it. |
|---|
| 385 | + */ |
|---|
| 386 | +#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ |
|---|
| 387 | +#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ |
|---|
| 388 | + |
|---|
| 389 | +#define __cr4_reserved_bits(__cpu_has, __c) \ |
|---|
| 390 | +({ \ |
|---|
| 391 | + u64 __reserved_bits = CR4_RESERVED_BITS; \ |
|---|
| 392 | + \ |
|---|
| 393 | + if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ |
|---|
| 394 | + __reserved_bits |= X86_CR4_OSXSAVE; \ |
|---|
| 395 | + if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ |
|---|
| 396 | + __reserved_bits |= X86_CR4_SMEP; \ |
|---|
| 397 | + if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ |
|---|
| 398 | + __reserved_bits |= X86_CR4_SMAP; \ |
|---|
| 399 | + if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ |
|---|
| 400 | + __reserved_bits |= X86_CR4_FSGSBASE; \ |
|---|
| 401 | + if (!__cpu_has(__c, X86_FEATURE_PKU)) \ |
|---|
| 402 | + __reserved_bits |= X86_CR4_PKE; \ |
|---|
| 403 | + if (!__cpu_has(__c, X86_FEATURE_LA57)) \ |
|---|
| 404 | + __reserved_bits |= X86_CR4_LA57; \ |
|---|
| 405 | + if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ |
|---|
| 406 | + __reserved_bits |= X86_CR4_UMIP; \ |
|---|
| 407 | + if (!__cpu_has(__c, X86_FEATURE_VMX)) \ |
|---|
| 408 | + __reserved_bits |= X86_CR4_VMXE; \ |
|---|
| 409 | + if (!__cpu_has(__c, X86_FEATURE_PCID)) \ |
|---|
| 410 | + __reserved_bits |= X86_CR4_PCIDE; \ |
|---|
| 411 | + __reserved_bits; \ |
|---|
| 412 | +}) |
|---|
| 364 | 413 | |
|---|
| 365 | 414 | #endif |
|---|