.. | .. |
---|
25 | 25 | #include <linux/init.h> |
---|
26 | 26 | #include <linux/export.h> |
---|
27 | 27 | #include <linux/clocksource.h> |
---|
| 28 | +#include <linux/cpu.h> |
---|
| 29 | +#include <linux/reboot.h> |
---|
28 | 30 | #include <asm/div64.h> |
---|
29 | 31 | #include <asm/x86_init.h> |
---|
30 | 32 | #include <asm/hypervisor.h> |
---|
31 | 33 | #include <asm/timer.h> |
---|
32 | 34 | #include <asm/apic.h> |
---|
| 35 | +#include <asm/vmware.h> |
---|
| 36 | +#include <asm/svm.h> |
---|
33 | 37 | |
---|
34 | 38 | #undef pr_fmt |
---|
35 | 39 | #define pr_fmt(fmt) "vmware: " fmt |
---|
36 | 40 | |
---|
37 | | -#define CPUID_VMWARE_INFO_LEAF 0x40000000 |
---|
38 | | -#define VMWARE_HYPERVISOR_MAGIC 0x564D5868 |
---|
39 | | -#define VMWARE_HYPERVISOR_PORT 0x5658 |
---|
| 41 | +#define CPUID_VMWARE_INFO_LEAF 0x40000000 |
---|
| 42 | +#define CPUID_VMWARE_FEATURES_LEAF 0x40000010 |
---|
| 43 | +#define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0) |
---|
| 44 | +#define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1) |
---|
40 | 45 | |
---|
41 | | -#define VMWARE_PORT_CMD_GETVERSION 10 |
---|
42 | | -#define VMWARE_PORT_CMD_GETHZ 45 |
---|
43 | | -#define VMWARE_PORT_CMD_GETVCPU_INFO 68 |
---|
44 | | -#define VMWARE_PORT_CMD_LEGACY_X2APIC 3 |
---|
45 | | -#define VMWARE_PORT_CMD_VCPU_RESERVED 31 |
---|
| 46 | +#define VMWARE_HYPERVISOR_MAGIC 0x564D5868 |
---|
| 47 | + |
---|
| 48 | +#define VMWARE_CMD_GETVERSION 10 |
---|
| 49 | +#define VMWARE_CMD_GETHZ 45 |
---|
| 50 | +#define VMWARE_CMD_GETVCPU_INFO 68 |
---|
| 51 | +#define VMWARE_CMD_LEGACY_X2APIC 3 |
---|
| 52 | +#define VMWARE_CMD_VCPU_RESERVED 31 |
---|
| 53 | +#define VMWARE_CMD_STEALCLOCK 91 |
---|
| 54 | + |
---|
| 55 | +#define STEALCLOCK_NOT_AVAILABLE (-1) |
---|
| 56 | +#define STEALCLOCK_DISABLED 0 |
---|
| 57 | +#define STEALCLOCK_ENABLED 1 |
---|
46 | 58 | |
---|
47 | 59 | #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ |
---|
48 | 60 | __asm__("inl (%%dx), %%eax" : \ |
---|
49 | | - "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ |
---|
50 | | - "0"(VMWARE_HYPERVISOR_MAGIC), \ |
---|
51 | | - "1"(VMWARE_PORT_CMD_##cmd), \ |
---|
52 | | - "2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \ |
---|
53 | | - "memory"); |
---|
| 61 | + "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ |
---|
| 62 | + "a"(VMWARE_HYPERVISOR_MAGIC), \ |
---|
| 63 | + "c"(VMWARE_CMD_##cmd), \ |
---|
| 64 | + "d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \ |
---|
| 65 | + "memory") |
---|
| 66 | + |
---|
| 67 | +#define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) \ |
---|
| 68 | + __asm__("vmcall" : \ |
---|
| 69 | + "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ |
---|
| 70 | + "a"(VMWARE_HYPERVISOR_MAGIC), \ |
---|
| 71 | + "c"(VMWARE_CMD_##cmd), \ |
---|
| 72 | + "d"(0), "b"(UINT_MAX) : \ |
---|
| 73 | + "memory") |
---|
| 74 | + |
---|
| 75 | +#define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx) \ |
---|
| 76 | + __asm__("vmmcall" : \ |
---|
| 77 | + "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ |
---|
| 78 | + "a"(VMWARE_HYPERVISOR_MAGIC), \ |
---|
| 79 | + "c"(VMWARE_CMD_##cmd), \ |
---|
| 80 | + "d"(0), "b"(UINT_MAX) : \ |
---|
| 81 | + "memory") |
---|
| 82 | + |
---|
| 83 | +#define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do { \ |
---|
| 84 | + switch (vmware_hypercall_mode) { \ |
---|
| 85 | + case CPUID_VMWARE_FEATURES_ECX_VMCALL: \ |
---|
| 86 | + VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \ |
---|
| 87 | + break; \ |
---|
| 88 | + case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \ |
---|
| 89 | + VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \ |
---|
| 90 | + break; \ |
---|
| 91 | + default: \ |
---|
| 92 | + VMWARE_PORT(cmd, eax, ebx, ecx, edx); \ |
---|
| 93 | + break; \ |
---|
| 94 | + } \ |
---|
| 95 | + } while (0) |
---|
| 96 | + |
---|
| 97 | +struct vmware_steal_time { |
---|
| 98 | + union { |
---|
| 99 | + uint64_t clock; /* stolen time counter in units of vtsc */ |
---|
| 100 | + struct { |
---|
| 101 | + /* only for little-endian */ |
---|
| 102 | + uint32_t clock_low; |
---|
| 103 | + uint32_t clock_high; |
---|
| 104 | + }; |
---|
| 105 | + }; |
---|
| 106 | + uint64_t reserved[7]; |
---|
| 107 | +}; |
---|
54 | 108 | |
---|
55 | 109 | static unsigned long vmware_tsc_khz __ro_after_init; |
---|
| 110 | +static u8 vmware_hypercall_mode __ro_after_init; |
---|
56 | 111 | |
---|
57 | 112 | static inline int __vmware_platform(void) |
---|
58 | 113 | { |
---|
59 | 114 | uint32_t eax, ebx, ecx, edx; |
---|
60 | | - VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx); |
---|
| 115 | + VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx); |
---|
61 | 116 | return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; |
---|
62 | 117 | } |
---|
63 | 118 | |
---|
.. | .. |
---|
68 | 123 | |
---|
69 | 124 | #ifdef CONFIG_PARAVIRT |
---|
70 | 125 | static struct cyc2ns_data vmware_cyc2ns __ro_after_init; |
---|
71 | | -static int vmw_sched_clock __initdata = 1; |
---|
| 126 | +static bool vmw_sched_clock __initdata = true; |
---|
| 127 | +static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned(64); |
---|
| 128 | +static bool has_steal_clock; |
---|
| 129 | +static bool steal_acc __initdata = true; /* steal time accounting */ |
---|
72 | 130 | |
---|
73 | 131 | static __init int setup_vmw_sched_clock(char *s) |
---|
74 | 132 | { |
---|
75 | | - vmw_sched_clock = 0; |
---|
| 133 | + vmw_sched_clock = false; |
---|
76 | 134 | return 0; |
---|
77 | 135 | } |
---|
78 | 136 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); |
---|
| 137 | + |
---|
| 138 | +static __init int parse_no_stealacc(char *arg) |
---|
| 139 | +{ |
---|
| 140 | + steal_acc = false; |
---|
| 141 | + return 0; |
---|
| 142 | +} |
---|
| 143 | +early_param("no-steal-acc", parse_no_stealacc); |
---|
79 | 144 | |
---|
80 | 145 | static unsigned long long notrace vmware_sched_clock(void) |
---|
81 | 146 | { |
---|
.. | .. |
---|
87 | 152 | return ns; |
---|
88 | 153 | } |
---|
89 | 154 | |
---|
90 | | -static void __init vmware_sched_clock_setup(void) |
---|
| 155 | +static void __init vmware_cyc2ns_setup(void) |
---|
91 | 156 | { |
---|
92 | 157 | struct cyc2ns_data *d = &vmware_cyc2ns; |
---|
93 | 158 | unsigned long long tsc_now = rdtsc(); |
---|
.. | .. |
---|
97 | 162 | d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul, |
---|
98 | 163 | d->cyc2ns_shift); |
---|
99 | 164 | |
---|
100 | | - pv_time_ops.sched_clock = vmware_sched_clock; |
---|
101 | | - pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset); |
---|
| 165 | + pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset); |
---|
102 | 166 | } |
---|
| 167 | + |
---|
| 168 | +static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2) |
---|
| 169 | +{ |
---|
| 170 | + uint32_t result, info; |
---|
| 171 | + |
---|
| 172 | + asm volatile (VMWARE_HYPERCALL : |
---|
| 173 | + "=a"(result), |
---|
| 174 | + "=c"(info) : |
---|
| 175 | + "a"(VMWARE_HYPERVISOR_MAGIC), |
---|
| 176 | + "b"(0), |
---|
| 177 | + "c"(VMWARE_CMD_STEALCLOCK), |
---|
| 178 | + "d"(0), |
---|
| 179 | + "S"(arg1), |
---|
| 180 | + "D"(arg2) : |
---|
| 181 | + "memory"); |
---|
| 182 | + return result; |
---|
| 183 | +} |
---|
| 184 | + |
---|
| 185 | +static bool stealclock_enable(phys_addr_t pa) |
---|
| 186 | +{ |
---|
| 187 | + return vmware_cmd_stealclock(upper_32_bits(pa), |
---|
| 188 | + lower_32_bits(pa)) == STEALCLOCK_ENABLED; |
---|
| 189 | +} |
---|
| 190 | + |
---|
| 191 | +static int __stealclock_disable(void) |
---|
| 192 | +{ |
---|
| 193 | + return vmware_cmd_stealclock(0, 1); |
---|
| 194 | +} |
---|
| 195 | + |
---|
| 196 | +static void stealclock_disable(void) |
---|
| 197 | +{ |
---|
| 198 | + __stealclock_disable(); |
---|
| 199 | +} |
---|
| 200 | + |
---|
| 201 | +static bool vmware_is_stealclock_available(void) |
---|
| 202 | +{ |
---|
| 203 | + return __stealclock_disable() != STEALCLOCK_NOT_AVAILABLE; |
---|
| 204 | +} |
---|
| 205 | + |
---|
| 206 | +/** |
---|
| 207 | + * vmware_steal_clock() - read the per-cpu steal clock |
---|
| 208 | + * @cpu: the cpu number whose steal clock we want to read |
---|
| 209 | + * |
---|
| 210 | + * The function reads the steal clock if we are on a 64-bit system, otherwise |
---|
| 211 | + * reads it in parts, checking that the high part didn't change in the |
---|
| 212 | + * meantime. |
---|
| 213 | + * |
---|
| 214 | + * Return: |
---|
| 215 | + * The steal clock reading in ns. |
---|
| 216 | + */ |
---|
| 217 | +static uint64_t vmware_steal_clock(int cpu) |
---|
| 218 | +{ |
---|
| 219 | + struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu); |
---|
| 220 | + uint64_t clock; |
---|
| 221 | + |
---|
| 222 | + if (IS_ENABLED(CONFIG_64BIT)) |
---|
| 223 | + clock = READ_ONCE(steal->clock); |
---|
| 224 | + else { |
---|
| 225 | + uint32_t initial_high, low, high; |
---|
| 226 | + |
---|
| 227 | + do { |
---|
| 228 | + initial_high = READ_ONCE(steal->clock_high); |
---|
| 229 | + /* Do not reorder initial_high and high readings */ |
---|
| 230 | + virt_rmb(); |
---|
| 231 | + low = READ_ONCE(steal->clock_low); |
---|
| 232 | + /* Keep low reading in between */ |
---|
| 233 | + virt_rmb(); |
---|
| 234 | + high = READ_ONCE(steal->clock_high); |
---|
| 235 | + } while (initial_high != high); |
---|
| 236 | + |
---|
| 237 | + clock = ((uint64_t)high << 32) | low; |
---|
| 238 | + } |
---|
| 239 | + |
---|
| 240 | + return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul, |
---|
| 241 | + vmware_cyc2ns.cyc2ns_shift); |
---|
| 242 | +} |
---|
| 243 | + |
---|
| 244 | +static void vmware_register_steal_time(void) |
---|
| 245 | +{ |
---|
| 246 | + int cpu = smp_processor_id(); |
---|
| 247 | + struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu); |
---|
| 248 | + |
---|
| 249 | + if (!has_steal_clock) |
---|
| 250 | + return; |
---|
| 251 | + |
---|
| 252 | + if (!stealclock_enable(slow_virt_to_phys(st))) { |
---|
| 253 | + has_steal_clock = false; |
---|
| 254 | + return; |
---|
| 255 | + } |
---|
| 256 | + |
---|
| 257 | + pr_info("vmware-stealtime: cpu %d, pa %llx\n", |
---|
| 258 | + cpu, (unsigned long long) slow_virt_to_phys(st)); |
---|
| 259 | +} |
---|
| 260 | + |
---|
| 261 | +static void vmware_disable_steal_time(void) |
---|
| 262 | +{ |
---|
| 263 | + if (!has_steal_clock) |
---|
| 264 | + return; |
---|
| 265 | + |
---|
| 266 | + stealclock_disable(); |
---|
| 267 | +} |
---|
| 268 | + |
---|
| 269 | +static void vmware_guest_cpu_init(void) |
---|
| 270 | +{ |
---|
| 271 | + if (has_steal_clock) |
---|
| 272 | + vmware_register_steal_time(); |
---|
| 273 | +} |
---|
| 274 | + |
---|
| 275 | +static void vmware_pv_guest_cpu_reboot(void *unused) |
---|
| 276 | +{ |
---|
| 277 | + vmware_disable_steal_time(); |
---|
| 278 | +} |
---|
| 279 | + |
---|
| 280 | +static int vmware_pv_reboot_notify(struct notifier_block *nb, |
---|
| 281 | + unsigned long code, void *unused) |
---|
| 282 | +{ |
---|
| 283 | + if (code == SYS_RESTART) |
---|
| 284 | + on_each_cpu(vmware_pv_guest_cpu_reboot, NULL, 1); |
---|
| 285 | + return NOTIFY_DONE; |
---|
| 286 | +} |
---|
| 287 | + |
---|
| 288 | +static struct notifier_block vmware_pv_reboot_nb = { |
---|
| 289 | + .notifier_call = vmware_pv_reboot_notify, |
---|
| 290 | +}; |
---|
| 291 | + |
---|
| 292 | +#ifdef CONFIG_SMP |
---|
| 293 | +static void __init vmware_smp_prepare_boot_cpu(void) |
---|
| 294 | +{ |
---|
| 295 | + vmware_guest_cpu_init(); |
---|
| 296 | + native_smp_prepare_boot_cpu(); |
---|
| 297 | +} |
---|
| 298 | + |
---|
| 299 | +static int vmware_cpu_online(unsigned int cpu) |
---|
| 300 | +{ |
---|
| 301 | + local_irq_disable(); |
---|
| 302 | + vmware_guest_cpu_init(); |
---|
| 303 | + local_irq_enable(); |
---|
| 304 | + return 0; |
---|
| 305 | +} |
---|
| 306 | + |
---|
| 307 | +static int vmware_cpu_down_prepare(unsigned int cpu) |
---|
| 308 | +{ |
---|
| 309 | + local_irq_disable(); |
---|
| 310 | + vmware_disable_steal_time(); |
---|
| 311 | + local_irq_enable(); |
---|
| 312 | + return 0; |
---|
| 313 | +} |
---|
| 314 | +#endif |
---|
| 315 | + |
---|
| 316 | +static __init int activate_jump_labels(void) |
---|
| 317 | +{ |
---|
| 318 | + if (has_steal_clock) { |
---|
| 319 | + static_key_slow_inc(¶virt_steal_enabled); |
---|
| 320 | + if (steal_acc) |
---|
| 321 | + static_key_slow_inc(¶virt_steal_rq_enabled); |
---|
| 322 | + } |
---|
| 323 | + |
---|
| 324 | + return 0; |
---|
| 325 | +} |
---|
| 326 | +arch_initcall(activate_jump_labels); |
---|
103 | 327 | |
---|
104 | 328 | static void __init vmware_paravirt_ops_setup(void) |
---|
105 | 329 | { |
---|
106 | 330 | pv_info.name = "VMware hypervisor"; |
---|
107 | | - pv_cpu_ops.io_delay = paravirt_nop; |
---|
| 331 | + pv_ops.cpu.io_delay = paravirt_nop; |
---|
108 | 332 | |
---|
109 | | - if (vmware_tsc_khz && vmw_sched_clock) |
---|
110 | | - vmware_sched_clock_setup(); |
---|
| 333 | + if (vmware_tsc_khz == 0) |
---|
| 334 | + return; |
---|
| 335 | + |
---|
| 336 | + vmware_cyc2ns_setup(); |
---|
| 337 | + |
---|
| 338 | + if (vmw_sched_clock) |
---|
| 339 | + pv_ops.time.sched_clock = vmware_sched_clock; |
---|
| 340 | + |
---|
| 341 | + if (vmware_is_stealclock_available()) { |
---|
| 342 | + has_steal_clock = true; |
---|
| 343 | + pv_ops.time.steal_clock = vmware_steal_clock; |
---|
| 344 | + |
---|
| 345 | + /* We use reboot notifier only to disable steal clock */ |
---|
| 346 | + register_reboot_notifier(&vmware_pv_reboot_nb); |
---|
| 347 | + |
---|
| 348 | +#ifdef CONFIG_SMP |
---|
| 349 | + smp_ops.smp_prepare_boot_cpu = |
---|
| 350 | + vmware_smp_prepare_boot_cpu; |
---|
| 351 | + if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
---|
| 352 | + "x86/vmware:online", |
---|
| 353 | + vmware_cpu_online, |
---|
| 354 | + vmware_cpu_down_prepare) < 0) |
---|
| 355 | + pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n"); |
---|
| 356 | +#else |
---|
| 357 | + vmware_guest_cpu_init(); |
---|
| 358 | +#endif |
---|
| 359 | + } |
---|
111 | 360 | } |
---|
112 | 361 | #else |
---|
113 | 362 | #define vmware_paravirt_ops_setup() do {} while (0) |
---|
.. | .. |
---|
129 | 378 | { |
---|
130 | 379 | setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC); |
---|
131 | 380 | setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); |
---|
| 381 | + if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL) |
---|
| 382 | + setup_force_cpu_cap(X86_FEATURE_VMCALL); |
---|
| 383 | + else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL) |
---|
| 384 | + setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL); |
---|
132 | 385 | } |
---|
133 | 386 | |
---|
134 | 387 | static void __init vmware_platform_setup(void) |
---|
.. | .. |
---|
136 | 389 | uint32_t eax, ebx, ecx, edx; |
---|
137 | 390 | uint64_t lpj, tsc_khz; |
---|
138 | 391 | |
---|
139 | | - VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
---|
| 392 | + VMWARE_CMD(GETHZ, eax, ebx, ecx, edx); |
---|
140 | 393 | |
---|
141 | 394 | if (ebx != UINT_MAX) { |
---|
142 | 395 | lpj = tsc_khz = eax | (((uint64_t)ebx) << 32); |
---|
.. | .. |
---|
157 | 410 | |
---|
158 | 411 | #ifdef CONFIG_X86_LOCAL_APIC |
---|
159 | 412 | /* Skip lapic calibration since we know the bus frequency. */ |
---|
160 | | - lapic_timer_frequency = ecx / HZ; |
---|
| 413 | + lapic_timer_period = ecx / HZ; |
---|
161 | 414 | pr_info("Host bus clock speed read from hypervisor : %u Hz\n", |
---|
162 | 415 | ecx); |
---|
163 | 416 | #endif |
---|
.. | .. |
---|
174 | 427 | vmware_set_capabilities(); |
---|
175 | 428 | } |
---|
176 | 429 | |
---|
| 430 | +static u8 __init vmware_select_hypercall(void) |
---|
| 431 | +{ |
---|
| 432 | + int eax, ebx, ecx, edx; |
---|
| 433 | + |
---|
| 434 | + cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx); |
---|
| 435 | + return (ecx & (CPUID_VMWARE_FEATURES_ECX_VMMCALL | |
---|
| 436 | + CPUID_VMWARE_FEATURES_ECX_VMCALL)); |
---|
| 437 | +} |
---|
| 438 | + |
---|
177 | 439 | /* |
---|
178 | 440 | * While checking the dmi string information, just checking the product |
---|
179 | 441 | * serial key should be enough, as this will always have a VMware |
---|
180 | 442 | * specific string when running under VMware hypervisor. |
---|
| 443 | + * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode |
---|
| 444 | + * intentionally defaults to 0. |
---|
181 | 445 | */ |
---|
182 | 446 | static uint32_t __init vmware_platform(void) |
---|
183 | 447 | { |
---|
.. | .. |
---|
187 | 451 | |
---|
188 | 452 | cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], |
---|
189 | 453 | &hyper_vendor_id[1], &hyper_vendor_id[2]); |
---|
190 | | - if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) |
---|
| 454 | + if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) { |
---|
| 455 | + if (eax >= CPUID_VMWARE_FEATURES_LEAF) |
---|
| 456 | + vmware_hypercall_mode = |
---|
| 457 | + vmware_select_hypercall(); |
---|
| 458 | + |
---|
| 459 | + pr_info("hypercall mode: 0x%02x\n", |
---|
| 460 | + (unsigned int) vmware_hypercall_mode); |
---|
| 461 | + |
---|
191 | 462 | return CPUID_VMWARE_INFO_LEAF; |
---|
| 463 | + } |
---|
192 | 464 | } else if (dmi_available && dmi_name_in_serial("VMware") && |
---|
193 | 465 | __vmware_platform()) |
---|
194 | 466 | return 1; |
---|
.. | .. |
---|
200 | 472 | static bool __init vmware_legacy_x2apic_available(void) |
---|
201 | 473 | { |
---|
202 | 474 | uint32_t eax, ebx, ecx, edx; |
---|
203 | | - VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx); |
---|
204 | | - return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 && |
---|
205 | | - (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; |
---|
| 475 | + VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx); |
---|
| 476 | + return (eax & (1 << VMWARE_CMD_VCPU_RESERVED)) == 0 && |
---|
| 477 | + (eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0; |
---|
206 | 478 | } |
---|
207 | 479 | |
---|
| 480 | +#ifdef CONFIG_AMD_MEM_ENCRYPT |
---|
| 481 | +static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb, |
---|
| 482 | + struct pt_regs *regs) |
---|
| 483 | +{ |
---|
| 484 | + /* Copy VMWARE specific Hypercall parameters to the GHCB */ |
---|
| 485 | + ghcb_set_rip(ghcb, regs->ip); |
---|
| 486 | + ghcb_set_rbx(ghcb, regs->bx); |
---|
| 487 | + ghcb_set_rcx(ghcb, regs->cx); |
---|
| 488 | + ghcb_set_rdx(ghcb, regs->dx); |
---|
| 489 | + ghcb_set_rsi(ghcb, regs->si); |
---|
| 490 | + ghcb_set_rdi(ghcb, regs->di); |
---|
| 491 | + ghcb_set_rbp(ghcb, regs->bp); |
---|
| 492 | +} |
---|
| 493 | + |
---|
| 494 | +static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) |
---|
| 495 | +{ |
---|
| 496 | + if (!(ghcb_rbx_is_valid(ghcb) && |
---|
| 497 | + ghcb_rcx_is_valid(ghcb) && |
---|
| 498 | + ghcb_rdx_is_valid(ghcb) && |
---|
| 499 | + ghcb_rsi_is_valid(ghcb) && |
---|
| 500 | + ghcb_rdi_is_valid(ghcb) && |
---|
| 501 | + ghcb_rbp_is_valid(ghcb))) |
---|
| 502 | + return false; |
---|
| 503 | + |
---|
| 504 | + regs->bx = ghcb->save.rbx; |
---|
| 505 | + regs->cx = ghcb->save.rcx; |
---|
| 506 | + regs->dx = ghcb->save.rdx; |
---|
| 507 | + regs->si = ghcb->save.rsi; |
---|
| 508 | + regs->di = ghcb->save.rdi; |
---|
| 509 | + regs->bp = ghcb->save.rbp; |
---|
| 510 | + |
---|
| 511 | + return true; |
---|
| 512 | +} |
---|
| 513 | +#endif |
---|
| 514 | + |
---|
208 | 515 | const __initconst struct hypervisor_x86 x86_hyper_vmware = { |
---|
209 | | - .name = "VMware", |
---|
210 | | - .detect = vmware_platform, |
---|
211 | | - .type = X86_HYPER_VMWARE, |
---|
212 | | - .init.init_platform = vmware_platform_setup, |
---|
213 | | - .init.x2apic_available = vmware_legacy_x2apic_available, |
---|
| 516 | + .name = "VMware", |
---|
| 517 | + .detect = vmware_platform, |
---|
| 518 | + .type = X86_HYPER_VMWARE, |
---|
| 519 | + .init.init_platform = vmware_platform_setup, |
---|
| 520 | + .init.x2apic_available = vmware_legacy_x2apic_available, |
---|
| 521 | +#ifdef CONFIG_AMD_MEM_ENCRYPT |
---|
| 522 | + .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare, |
---|
| 523 | + .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish, |
---|
| 524 | +#endif |
---|
214 | 525 | }; |
---|