.. | .. |
---|
752 | 752 | static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, |
---|
753 | 753 | struct vmx_uret_msr *msr, u64 data) |
---|
754 | 754 | { |
---|
| 755 | + unsigned long flags; |
---|
755 | 756 | int ret = 0; |
---|
756 | 757 | |
---|
757 | 758 | u64 old_msr_data = msr->data; |
---|
758 | 759 | msr->data = data; |
---|
759 | 760 | if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) { |
---|
760 | | - preempt_disable(); |
---|
| 761 | + flags = hard_preempt_disable(); |
---|
761 | 762 | ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask); |
---|
762 | | - preempt_enable(); |
---|
| 763 | + hard_preempt_enable(flags); |
---|
763 | 764 | if (ret) |
---|
764 | 765 | msr->data = old_msr_data; |
---|
765 | 766 | } |
---|
.. | .. |
---|
1383 | 1384 | #ifdef CONFIG_X86_64 |
---|
1384 | 1385 | static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) |
---|
1385 | 1386 | { |
---|
1386 | | - preempt_disable(); |
---|
| 1387 | + unsigned long flags; |
---|
| 1388 | + |
---|
| 1389 | + flags = hard_preempt_disable(); |
---|
1387 | 1390 | if (vmx->guest_state_loaded) |
---|
1388 | 1391 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
---|
1389 | | - preempt_enable(); |
---|
| 1392 | + hard_preempt_enable(flags); |
---|
1390 | 1393 | return vmx->msr_guest_kernel_gs_base; |
---|
1391 | 1394 | } |
---|
1392 | 1395 | |
---|
1393 | 1396 | static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) |
---|
1394 | 1397 | { |
---|
1395 | | - preempt_disable(); |
---|
| 1398 | + unsigned long flags; |
---|
| 1399 | + |
---|
| 1400 | + flags = hard_preempt_disable(); |
---|
1396 | 1401 | if (vmx->guest_state_loaded) |
---|
1397 | 1402 | wrmsrl(MSR_KERNEL_GS_BASE, data); |
---|
1398 | | - preempt_enable(); |
---|
| 1403 | + hard_preempt_enable(flags); |
---|
1399 | 1404 | vmx->msr_guest_kernel_gs_base = data; |
---|
1400 | 1405 | } |
---|
1401 | 1406 | #endif |
---|
.. | .. |
---|
1795 | 1800 | */ |
---|
1796 | 1801 | static void setup_msrs(struct vcpu_vmx *vmx) |
---|
1797 | 1802 | { |
---|
| 1803 | + hard_cond_local_irq_disable(); |
---|
1798 | 1804 | vmx->guest_uret_msrs_loaded = false; |
---|
1799 | 1805 | vmx->nr_active_uret_msrs = 0; |
---|
1800 | 1806 | #ifdef CONFIG_X86_64 |
---|
.. | .. |
---|
1815 | 1821 | vmx_setup_uret_msr(vmx, MSR_TSC_AUX); |
---|
1816 | 1822 | |
---|
1817 | 1823 | vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL); |
---|
| 1824 | + hard_cond_local_irq_enable(); |
---|
1818 | 1825 | |
---|
1819 | 1826 | if (cpu_has_vmx_msr_bitmap()) |
---|
1820 | 1827 | vmx_update_msr_bitmap(&vmx->vcpu); |
---|
.. | .. |
---|
2050 | 2057 | u32 msr_index = msr_info->index; |
---|
2051 | 2058 | u64 data = msr_info->data; |
---|
2052 | 2059 | u32 index; |
---|
| 2060 | + unsigned long flags; |
---|
2053 | 2061 | |
---|
2054 | 2062 | switch (msr_index) { |
---|
2055 | 2063 | case MSR_EFER: |
---|
.. | .. |
---|
2289 | 2297 | |
---|
2290 | 2298 | default: |
---|
2291 | 2299 | find_uret_msr: |
---|
| 2300 | + /* |
---|
| 2301 | + * Guest MSRs may be activated independently from |
---|
| 2302 | + * vcpu_run(): rely on the notifier for restoring them |
---|
| 2303 | + * upon preemption by the companion core, right before |
---|
| 2304 | + * the current CPU switches to out-of-band scheduling |
---|
| 2305 | + * (see dovetail_context_switch()). |
---|
| 2306 | + */ |
---|
2292 | 2307 | msr = vmx_find_uret_msr(vmx, msr_index); |
---|
2293 | | - if (msr) |
---|
| 2308 | + if (msr) { |
---|
| 2309 | + flags = hard_cond_local_irq_save(); |
---|
| 2310 | + inband_enter_guest(vcpu); |
---|
2294 | 2311 | ret = vmx_set_guest_uret_msr(vmx, msr, data); |
---|
2295 | | - else |
---|
| 2312 | + hard_cond_local_irq_restore(flags); |
---|
| 2313 | + } else { |
---|
2296 | 2314 | ret = kvm_set_msr_common(vcpu, msr_info); |
---|
| 2315 | + } |
---|
2297 | 2316 | } |
---|
2298 | 2317 | |
---|
2299 | 2318 | /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ |
---|
.. | .. |
---|
7056 | 7075 | vmx_vcpu_load(vcpu, cpu); |
---|
7057 | 7076 | vcpu->cpu = cpu; |
---|
7058 | 7077 | init_vmcs(vmx); |
---|
| 7078 | + hard_cond_local_irq_disable(); |
---|
7059 | 7079 | vmx_vcpu_put(vcpu); |
---|
| 7080 | + hard_cond_local_irq_enable(); |
---|
7060 | 7081 | put_cpu(); |
---|
7061 | 7082 | if (cpu_need_virtualize_apic_accesses(vcpu)) { |
---|
7062 | 7083 | err = alloc_apic_access_page(vcpu->kvm); |
---|