.. | .. |
---|
5 | 5 | #include "x86.h" |
---|
6 | 6 | #include <asm/cpu.h> |
---|
7 | 7 | #include <asm/processor.h> |
---|
| 8 | +#include <uapi/asm/kvm_para.h> |
---|
8 | 9 | |
---|
9 | | -int kvm_update_cpuid(struct kvm_vcpu *vcpu); |
---|
10 | | -bool kvm_mpx_supported(void); |
---|
| 10 | +extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly; |
---|
| 11 | +void kvm_set_cpu_caps(void); |
---|
| 12 | + |
---|
| 13 | +void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); |
---|
| 14 | +void kvm_update_pv_runtime(struct kvm_vcpu *vcpu); |
---|
11 | 15 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
---|
12 | 16 | u32 function, u32 index); |
---|
13 | 17 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
---|
.. | .. |
---|
23 | 27 | struct kvm_cpuid2 *cpuid, |
---|
24 | 28 | struct kvm_cpuid_entry2 __user *entries); |
---|
25 | 29 | bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, |
---|
26 | | - u32 *ecx, u32 *edx, bool check_limit); |
---|
| 30 | + u32 *ecx, u32 *edx, bool exact_only); |
---|
27 | 31 | |
---|
28 | 32 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); |
---|
29 | 33 | |
---|
30 | 34 | static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) |
---|
31 | 35 | { |
---|
32 | 36 | return vcpu->arch.maxphyaddr; |
---|
| 37 | +} |
---|
| 38 | + |
---|
| 39 | +static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
---|
| 40 | +{ |
---|
| 41 | + return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu))); |
---|
33 | 42 | } |
---|
34 | 43 | |
---|
35 | 44 | struct cpuid_reg { |
---|
.. | .. |
---|
53 | 62 | [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, |
---|
54 | 63 | [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, |
---|
55 | 64 | [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, |
---|
| 65 | + [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, |
---|
56 | 66 | }; |
---|
57 | 67 | |
---|
58 | | -static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) |
---|
| 68 | +/* |
---|
| 69 | + * Reverse CPUID and its derivatives can only be used for hardware-defined |
---|
| 70 | + * feature words, i.e. words whose bits directly correspond to a CPUID leaf. |
---|
| 71 | + * Retrieving a feature bit or masking guest CPUID from a Linux-defined word |
---|
| 72 | + * is nonsensical as the bit number/mask is an arbitrary software-defined value |
---|
| 73 | + * and can't be used by KVM to query/control guest capabilities. And obviously |
---|
| 74 | + * the leaf being queried must have an entry in the lookup table. |
---|
| 75 | + */ |
---|
| 76 | +static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) |
---|
59 | 77 | { |
---|
60 | | - unsigned x86_leaf = x86_feature / 32; |
---|
61 | | - |
---|
| 78 | + BUILD_BUG_ON(x86_leaf == CPUID_LNX_1); |
---|
| 79 | + BUILD_BUG_ON(x86_leaf == CPUID_LNX_2); |
---|
| 80 | + BUILD_BUG_ON(x86_leaf == CPUID_LNX_3); |
---|
| 81 | + BUILD_BUG_ON(x86_leaf == CPUID_LNX_4); |
---|
62 | 82 | BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); |
---|
63 | 83 | BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); |
---|
| 84 | +} |
---|
64 | 85 | |
---|
| 86 | +/* |
---|
| 87 | + * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain |
---|
| 88 | + * the hardware defined bit number (stored in bits 4:0) and a software defined |
---|
| 89 | + * "word" (stored in bits 31:5). The word is used to index into arrays of |
---|
| 90 | + * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has(). |
---|
| 91 | + */ |
---|
| 92 | +static __always_inline u32 __feature_bit(int x86_feature) |
---|
| 93 | +{ |
---|
| 94 | + reverse_cpuid_check(x86_feature / 32); |
---|
| 95 | + return 1 << (x86_feature & 31); |
---|
| 96 | +} |
---|
| 97 | + |
---|
| 98 | +#define feature_bit(name) __feature_bit(X86_FEATURE_##name) |
---|
| 99 | + |
---|
| 100 | +static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) |
---|
| 101 | +{ |
---|
| 102 | + unsigned int x86_leaf = x86_feature / 32; |
---|
| 103 | + |
---|
| 104 | + reverse_cpuid_check(x86_leaf); |
---|
65 | 105 | return reverse_cpuid[x86_leaf]; |
---|
66 | 106 | } |
---|
67 | 107 | |
---|
68 | | -static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature) |
---|
| 108 | +static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, |
---|
| 109 | + u32 reg) |
---|
69 | 110 | { |
---|
70 | | - struct kvm_cpuid_entry2 *entry; |
---|
71 | | - const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); |
---|
72 | | - |
---|
73 | | - entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); |
---|
74 | | - if (!entry) |
---|
75 | | - return NULL; |
---|
76 | | - |
---|
77 | | - switch (cpuid.reg) { |
---|
| 111 | + switch (reg) { |
---|
78 | 112 | case CPUID_EAX: |
---|
79 | 113 | return &entry->eax; |
---|
80 | 114 | case CPUID_EBX: |
---|
.. | .. |
---|
89 | 123 | } |
---|
90 | 124 | } |
---|
91 | 125 | |
---|
92 | | -static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature) |
---|
| 126 | +static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, |
---|
| 127 | + unsigned int x86_feature) |
---|
93 | 128 | { |
---|
94 | | - int *reg; |
---|
| 129 | + const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); |
---|
95 | 130 | |
---|
96 | | - if (x86_feature == X86_FEATURE_XSAVE && |
---|
97 | | - !static_cpu_has(X86_FEATURE_XSAVE)) |
---|
98 | | - return false; |
---|
| 131 | + return __cpuid_entry_get_reg(entry, cpuid.reg); |
---|
| 132 | +} |
---|
| 133 | + |
---|
| 134 | +static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, |
---|
| 135 | + unsigned int x86_feature) |
---|
| 136 | +{ |
---|
| 137 | + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); |
---|
| 138 | + |
---|
| 139 | + return *reg & __feature_bit(x86_feature); |
---|
| 140 | +} |
---|
| 141 | + |
---|
| 142 | +static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, |
---|
| 143 | + unsigned int x86_feature) |
---|
| 144 | +{ |
---|
| 145 | + return cpuid_entry_get(entry, x86_feature); |
---|
| 146 | +} |
---|
| 147 | + |
---|
| 148 | +static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry, |
---|
| 149 | + unsigned int x86_feature) |
---|
| 150 | +{ |
---|
| 151 | + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); |
---|
| 152 | + |
---|
| 153 | + *reg &= ~__feature_bit(x86_feature); |
---|
| 154 | +} |
---|
| 155 | + |
---|
| 156 | +static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry, |
---|
| 157 | + unsigned int x86_feature) |
---|
| 158 | +{ |
---|
| 159 | + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); |
---|
| 160 | + |
---|
| 161 | + *reg |= __feature_bit(x86_feature); |
---|
| 162 | +} |
---|
| 163 | + |
---|
| 164 | +static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, |
---|
| 165 | + unsigned int x86_feature, |
---|
| 166 | + bool set) |
---|
| 167 | +{ |
---|
| 168 | + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); |
---|
| 169 | + |
---|
| 170 | + /* |
---|
| 171 | + * Open coded instead of using cpuid_entry_{clear,set}() to coerce the |
---|
| 172 | + * compiler into using CMOV instead of Jcc when possible. |
---|
| 173 | + */ |
---|
| 174 | + if (set) |
---|
| 175 | + *reg |= __feature_bit(x86_feature); |
---|
| 176 | + else |
---|
| 177 | + *reg &= ~__feature_bit(x86_feature); |
---|
| 178 | +} |
---|
| 179 | + |
---|
| 180 | +static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, |
---|
| 181 | + enum cpuid_leafs leaf) |
---|
| 182 | +{ |
---|
| 183 | + u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); |
---|
| 184 | + |
---|
| 185 | + BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps)); |
---|
| 186 | + *reg = kvm_cpu_caps[leaf]; |
---|
| 187 | +} |
---|
| 188 | + |
---|
| 189 | +static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, |
---|
| 190 | + unsigned int x86_feature) |
---|
| 191 | +{ |
---|
| 192 | + const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); |
---|
| 193 | + struct kvm_cpuid_entry2 *entry; |
---|
| 194 | + |
---|
| 195 | + entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); |
---|
| 196 | + if (!entry) |
---|
| 197 | + return NULL; |
---|
| 198 | + |
---|
| 199 | + return __cpuid_entry_get_reg(entry, cpuid.reg); |
---|
| 200 | +} |
---|
| 201 | + |
---|
| 202 | +static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, |
---|
| 203 | + unsigned int x86_feature) |
---|
| 204 | +{ |
---|
| 205 | + u32 *reg; |
---|
99 | 206 | |
---|
100 | 207 | reg = guest_cpuid_get_register(vcpu, x86_feature); |
---|
101 | 208 | if (!reg) |
---|
102 | 209 | return false; |
---|
103 | 210 | |
---|
104 | | - return *reg & bit(x86_feature); |
---|
| 211 | + return *reg & __feature_bit(x86_feature); |
---|
105 | 212 | } |
---|
106 | 213 | |
---|
107 | | -static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature) |
---|
| 214 | +static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, |
---|
| 215 | + unsigned int x86_feature) |
---|
108 | 216 | { |
---|
109 | | - int *reg; |
---|
| 217 | + u32 *reg; |
---|
110 | 218 | |
---|
111 | 219 | reg = guest_cpuid_get_register(vcpu, x86_feature); |
---|
112 | 220 | if (reg) |
---|
113 | | - *reg &= ~bit(x86_feature); |
---|
| 221 | + *reg &= ~__feature_bit(x86_feature); |
---|
114 | 222 | } |
---|
115 | 223 | |
---|
116 | | -static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) |
---|
| 224 | +static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) |
---|
117 | 225 | { |
---|
118 | 226 | struct kvm_cpuid_entry2 *best; |
---|
119 | 227 | |
---|
120 | 228 | best = kvm_find_cpuid_entry(vcpu, 0, 0); |
---|
121 | | - return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; |
---|
| 229 | + return best && |
---|
| 230 | + (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) || |
---|
| 231 | + is_guest_vendor_hygon(best->ebx, best->ecx, best->edx)); |
---|
122 | 232 | } |
---|
123 | 233 | |
---|
124 | 234 | static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
179 | 289 | MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; |
---|
180 | 290 | } |
---|
181 | 291 | |
---|
| 292 | +static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) |
---|
| 293 | +{ |
---|
| 294 | + unsigned int x86_leaf = x86_feature / 32; |
---|
| 295 | + |
---|
| 296 | + reverse_cpuid_check(x86_leaf); |
---|
| 297 | + kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); |
---|
| 298 | +} |
---|
| 299 | + |
---|
| 300 | +static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) |
---|
| 301 | +{ |
---|
| 302 | + unsigned int x86_leaf = x86_feature / 32; |
---|
| 303 | + |
---|
| 304 | + reverse_cpuid_check(x86_leaf); |
---|
| 305 | + kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); |
---|
| 306 | +} |
---|
| 307 | + |
---|
| 308 | +static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) |
---|
| 309 | +{ |
---|
| 310 | + unsigned int x86_leaf = x86_feature / 32; |
---|
| 311 | + |
---|
| 312 | + reverse_cpuid_check(x86_leaf); |
---|
| 313 | + return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); |
---|
| 314 | +} |
---|
| 315 | + |
---|
| 316 | +static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature) |
---|
| 317 | +{ |
---|
| 318 | + return !!kvm_cpu_cap_get(x86_feature); |
---|
| 319 | +} |
---|
| 320 | + |
---|
| 321 | +static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature) |
---|
| 322 | +{ |
---|
| 323 | + if (boot_cpu_has(x86_feature)) |
---|
| 324 | + kvm_cpu_cap_set(x86_feature); |
---|
| 325 | +} |
---|
| 326 | + |
---|
| 327 | +static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) |
---|
| 328 | +{ |
---|
| 329 | + return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); |
---|
| 330 | +} |
---|
| 331 | + |
---|
| 332 | +static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, |
---|
| 333 | + unsigned int kvm_feature) |
---|
| 334 | +{ |
---|
| 335 | + if (!vcpu->arch.pv_cpuid.enforce) |
---|
| 336 | + return true; |
---|
| 337 | + |
---|
| 338 | + return vcpu->arch.pv_cpuid.features & (1u << kvm_feature); |
---|
| 339 | +} |
---|
| 340 | + |
---|
182 | 341 | #endif |
---|