.. | .. |
---|
1392 | 1392 | |
---|
1393 | 1393 | if (sd->current_vmcb != svm->vmcb) { |
---|
1394 | 1394 | sd->current_vmcb = svm->vmcb; |
---|
1395 | | - indirect_branch_prediction_barrier(); |
---|
| 1395 | + |
---|
| 1396 | + if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT)) |
---|
| 1397 | + indirect_branch_prediction_barrier(); |
---|
1396 | 1398 | } |
---|
1397 | 1399 | avic_vcpu_load(vcpu, cpu); |
---|
1398 | 1400 | } |
---|
.. | .. |
---|
3374 | 3376 | |
---|
3375 | 3377 | static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) |
---|
3376 | 3378 | { |
---|
| 3379 | + amd_clear_divider(); |
---|
3377 | 3380 | } |
---|
3378 | 3381 | |
---|
3379 | 3382 | static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
3480 | 3483 | |
---|
3481 | 3484 | static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) |
---|
3482 | 3485 | { |
---|
3483 | | - if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && |
---|
3484 | | - to_svm(vcpu)->vmcb->control.exit_info_1) |
---|
| 3486 | + struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
---|
| 3487 | + |
---|
| 3488 | + /* |
---|
| 3489 | + * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM |
---|
| 3490 | + * can't read guest memory (dereference memslots) to decode the WRMSR. |
---|
| 3491 | + */ |
---|
| 3492 | + if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 && |
---|
| 3493 | + nrips && control->next_rip) |
---|
3485 | 3494 | return handle_fastpath_set_msr_irqoff(vcpu); |
---|
3486 | 3495 | |
---|
3487 | 3496 | return EXIT_FASTPATH_NONE; |
---|
.. | .. |
---|
3977 | 3986 | |
---|
3978 | 3987 | static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) |
---|
3979 | 3988 | { |
---|
| 3989 | + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) |
---|
| 3990 | + vcpu->arch.at_instruction_boundary = true; |
---|
3980 | 3991 | } |
---|
3981 | 3992 | |
---|
3982 | 3993 | static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) |
---|