.. | .. |
---|
2 | 2 | #ifndef ASM_KVM_CACHE_REGS_H |
---|
3 | 3 | #define ASM_KVM_CACHE_REGS_H |
---|
4 | 4 | |
---|
| 5 | +#include <linux/kvm_host.h> |
---|
| 6 | + |
---|
5 | 7 | #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS |
---|
6 | 8 | #define KVM_POSSIBLE_CR4_GUEST_BITS \ |
---|
7 | 9 | (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ |
---|
8 | | - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD) |
---|
| 10 | + | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) |
---|
9 | 11 | |
---|
10 | | -static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, |
---|
11 | | - enum kvm_reg reg) |
---|
| 12 | +#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ |
---|
| 13 | +static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ |
---|
| 14 | +{ \ |
---|
| 15 | + return vcpu->arch.regs[VCPU_REGS_##uname]; \ |
---|
| 16 | +} \ |
---|
| 17 | +static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \ |
---|
| 18 | + unsigned long val) \ |
---|
| 19 | +{ \ |
---|
| 20 | + vcpu->arch.regs[VCPU_REGS_##uname] = val; \ |
---|
| 21 | +} |
---|
| 22 | +BUILD_KVM_GPR_ACCESSORS(rax, RAX) |
---|
| 23 | +BUILD_KVM_GPR_ACCESSORS(rbx, RBX) |
---|
| 24 | +BUILD_KVM_GPR_ACCESSORS(rcx, RCX) |
---|
| 25 | +BUILD_KVM_GPR_ACCESSORS(rdx, RDX) |
---|
| 26 | +BUILD_KVM_GPR_ACCESSORS(rbp, RBP) |
---|
| 27 | +BUILD_KVM_GPR_ACCESSORS(rsi, RSI) |
---|
| 28 | +BUILD_KVM_GPR_ACCESSORS(rdi, RDI) |
---|
| 29 | +#ifdef CONFIG_X86_64 |
---|
| 30 | +BUILD_KVM_GPR_ACCESSORS(r8, R8) |
---|
| 31 | +BUILD_KVM_GPR_ACCESSORS(r9, R9) |
---|
| 32 | +BUILD_KVM_GPR_ACCESSORS(r10, R10) |
---|
| 33 | +BUILD_KVM_GPR_ACCESSORS(r11, R11) |
---|
| 34 | +BUILD_KVM_GPR_ACCESSORS(r12, R12) |
---|
| 35 | +BUILD_KVM_GPR_ACCESSORS(r13, R13) |
---|
| 36 | +BUILD_KVM_GPR_ACCESSORS(r14, R14) |
---|
| 37 | +BUILD_KVM_GPR_ACCESSORS(r15, R15) |
---|
| 38 | +#endif |
---|
| 39 | + |
---|
| 40 | +static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, |
---|
| 41 | + enum kvm_reg reg) |
---|
12 | 42 | { |
---|
13 | | - if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) |
---|
14 | | - kvm_x86_ops->cache_reg(vcpu, reg); |
---|
| 43 | + return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); |
---|
| 44 | +} |
---|
| 45 | + |
---|
| 46 | +static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, |
---|
| 47 | + enum kvm_reg reg) |
---|
| 48 | +{ |
---|
| 49 | + return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); |
---|
| 50 | +} |
---|
| 51 | + |
---|
| 52 | +static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, |
---|
| 53 | + enum kvm_reg reg) |
---|
| 54 | +{ |
---|
| 55 | + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); |
---|
| 56 | +} |
---|
| 57 | + |
---|
| 58 | +static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, |
---|
| 59 | + enum kvm_reg reg) |
---|
| 60 | +{ |
---|
| 61 | + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); |
---|
| 62 | + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); |
---|
| 63 | +} |
---|
| 64 | + |
---|
| 65 | +static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) |
---|
| 66 | +{ |
---|
| 67 | + if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) |
---|
| 68 | + return 0; |
---|
| 69 | + |
---|
| 70 | + if (!kvm_register_is_available(vcpu, reg)) |
---|
| 71 | + kvm_x86_ops.cache_reg(vcpu, reg); |
---|
15 | 72 | |
---|
16 | 73 | return vcpu->arch.regs[reg]; |
---|
17 | 74 | } |
---|
18 | 75 | |
---|
19 | | -static inline void kvm_register_write(struct kvm_vcpu *vcpu, |
---|
20 | | - enum kvm_reg reg, |
---|
| 76 | +static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, |
---|
21 | 77 | unsigned long val) |
---|
22 | 78 | { |
---|
| 79 | + if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) |
---|
| 80 | + return; |
---|
| 81 | + |
---|
23 | 82 | vcpu->arch.regs[reg] = val; |
---|
24 | | - __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); |
---|
25 | | - __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); |
---|
| 83 | + kvm_register_mark_dirty(vcpu, reg); |
---|
26 | 84 | } |
---|
27 | 85 | |
---|
28 | 86 | static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
35 | 93 | kvm_register_write(vcpu, VCPU_REGS_RIP, val); |
---|
36 | 94 | } |
---|
37 | 95 | |
---|
| 96 | +static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) |
---|
| 97 | +{ |
---|
| 98 | + return kvm_register_read(vcpu, VCPU_REGS_RSP); |
---|
| 99 | +} |
---|
| 100 | + |
---|
| 101 | +static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) |
---|
| 102 | +{ |
---|
| 103 | + kvm_register_write(vcpu, VCPU_REGS_RSP, val); |
---|
| 104 | +} |
---|
| 105 | + |
---|
38 | 106 | static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) |
---|
39 | 107 | { |
---|
40 | 108 | might_sleep(); /* on svm */ |
---|
41 | 109 | |
---|
42 | | - if (!test_bit(VCPU_EXREG_PDPTR, |
---|
43 | | - (unsigned long *)&vcpu->arch.regs_avail)) |
---|
44 | | - kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR); |
---|
| 110 | + if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) |
---|
| 111 | + kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); |
---|
45 | 112 | |
---|
46 | 113 | return vcpu->arch.walk_mmu->pdptrs[index]; |
---|
47 | 114 | } |
---|
.. | .. |
---|
49 | 116 | static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) |
---|
50 | 117 | { |
---|
51 | 118 | ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; |
---|
52 | | - if (tmask & vcpu->arch.cr0_guest_owned_bits) |
---|
53 | | - kvm_x86_ops->decache_cr0_guest_bits(vcpu); |
---|
| 119 | + if ((tmask & vcpu->arch.cr0_guest_owned_bits) && |
---|
| 120 | + !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) |
---|
| 121 | + kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); |
---|
54 | 122 | return vcpu->arch.cr0 & mask; |
---|
55 | 123 | } |
---|
56 | 124 | |
---|
.. | .. |
---|
62 | 130 | static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) |
---|
63 | 131 | { |
---|
64 | 132 | ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; |
---|
65 | | - if (tmask & vcpu->arch.cr4_guest_owned_bits) |
---|
66 | | - kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
---|
| 133 | + if ((tmask & vcpu->arch.cr4_guest_owned_bits) && |
---|
| 134 | + !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) |
---|
| 135 | + kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); |
---|
67 | 136 | return vcpu->arch.cr4 & mask; |
---|
68 | 137 | } |
---|
69 | 138 | |
---|
70 | 139 | static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) |
---|
71 | 140 | { |
---|
72 | | - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) |
---|
73 | | - kvm_x86_ops->decache_cr3(vcpu); |
---|
| 141 | + if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) |
---|
| 142 | + kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); |
---|
74 | 143 | return vcpu->arch.cr3; |
---|
75 | 144 | } |
---|
76 | 145 | |
---|
.. | .. |
---|
81 | 150 | |
---|
82 | 151 | static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) |
---|
83 | 152 | { |
---|
84 | | - return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u) |
---|
85 | | - | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); |
---|
| 153 | + return (kvm_rax_read(vcpu) & -1u) |
---|
| 154 | + | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); |
---|
86 | 155 | } |
---|
87 | 156 | |
---|
88 | 157 | static inline void enter_guest_mode(struct kvm_vcpu *vcpu) |
---|