hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/x86/kernel/kvm.c
....@@ -1,27 +1,17 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * KVM paravirt_ops implementation
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
174 *
185 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
196 * Copyright IBM Corporation, 2007
207 * Authors: Anthony Liguori <aliguori@us.ibm.com>
218 */
229
10
+#define pr_fmt(fmt) "kvm-guest: " fmt
11
+
2312 #include <linux/context_tracking.h>
2413 #include <linux/init.h>
14
+#include <linux/irq.h>
2515 #include <linux/kernel.h>
2616 #include <linux/kvm_para.h>
2717 #include <linux/cpu.h>
....@@ -34,9 +24,9 @@
3424 #include <linux/sched.h>
3525 #include <linux/slab.h>
3626 #include <linux/kprobes.h>
37
-#include <linux/debugfs.h>
3827 #include <linux/nmi.h>
3928 #include <linux/swait.h>
29
+#include <linux/syscore_ops.h>
4030 #include <asm/timer.h>
4131 #include <asm/cpu.h>
4232 #include <asm/traps.h>
....@@ -46,6 +36,12 @@
4636 #include <asm/apicdef.h>
4737 #include <asm/hypervisor.h>
4838 #include <asm/tlb.h>
39
+#include <asm/cpuidle_haltpoll.h>
40
+#include <asm/ptrace.h>
41
+#include <asm/reboot.h>
42
+#include <asm/svm.h>
43
+
44
+DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
4945
5046 static int kvmapf = 1;
5147
....@@ -67,9 +63,10 @@
6763 early_param("no-steal-acc", parse_no_stealacc);
6864
6965 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
70
-static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
66
+DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
7167 static int has_steal_clock = 0;
7268
69
+static int has_guest_poll = 0;
7370 /*
7471 * No need for any "IO delay" on KVM
7572 */
....@@ -85,7 +82,6 @@
8582 struct swait_queue_head wq;
8683 u32 token;
8784 int cpu;
88
- bool halted;
8985 };
9086
9187 static struct kvm_task_sleep_head {
....@@ -108,77 +104,64 @@
108104 return NULL;
109105 }
110106
111
-/*
112
- * @interrupt_kernel: Is this called from a routine which interrupts the kernel
113
- * (other than user space)?
114
- */
115
-void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
107
+static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
116108 {
117109 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
118110 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
119
- struct kvm_task_sleep_node n, *e;
120
- DECLARE_SWAITQUEUE(wait);
121
-
122
- rcu_irq_enter();
111
+ struct kvm_task_sleep_node *e;
123112
124113 raw_spin_lock(&b->lock);
125114 e = _find_apf_task(b, token);
126115 if (e) {
127116 /* dummy entry exist -> wake up was delivered ahead of PF */
128117 hlist_del(&e->link);
129
- kfree(e);
130118 raw_spin_unlock(&b->lock);
131
-
132
- rcu_irq_exit();
133
- return;
119
+ kfree(e);
120
+ return false;
134121 }
135122
136
- n.token = token;
137
- n.cpu = smp_processor_id();
138
- n.halted = is_idle_task(current) ||
139
- (IS_ENABLED(CONFIG_PREEMPT_COUNT)
140
- ? preempt_count() > 1 || rcu_preempt_depth()
141
- : interrupt_kernel);
142
- init_swait_queue_head(&n.wq);
143
- hlist_add_head(&n.link, &b->list);
123
+ n->token = token;
124
+ n->cpu = smp_processor_id();
125
+ init_swait_queue_head(&n->wq);
126
+ hlist_add_head(&n->link, &b->list);
144127 raw_spin_unlock(&b->lock);
128
+ return true;
129
+}
130
+
131
+/*
132
+ * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
133
+ * @token: Token to identify the sleep node entry
134
+ *
135
+ * Invoked from the async pagefault handling code or from the VM exit page
136
+ * fault handler. In both cases RCU is watching.
137
+ */
138
+void kvm_async_pf_task_wait_schedule(u32 token)
139
+{
140
+ struct kvm_task_sleep_node n;
141
+ DECLARE_SWAITQUEUE(wait);
142
+
143
+ lockdep_assert_irqs_disabled();
144
+
145
+ if (!kvm_async_pf_queue_task(token, &n))
146
+ return;
145147
146148 for (;;) {
147
- if (!n.halted)
148
- prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
149
+ prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
149150 if (hlist_unhashed(&n.link))
150151 break;
151152
152
- rcu_irq_exit();
153
-
154
- if (!n.halted) {
155
- local_irq_enable();
156
- schedule();
157
- local_irq_disable();
158
- } else {
159
- /*
160
- * We cannot reschedule. So halt.
161
- */
162
- native_safe_halt();
163
- local_irq_disable();
164
- }
165
-
166
- rcu_irq_enter();
153
+ local_irq_enable();
154
+ schedule();
155
+ local_irq_disable();
167156 }
168
- if (!n.halted)
169
- finish_swait(&n.wq, &wait);
170
-
171
- rcu_irq_exit();
172
- return;
157
+ finish_swait(&n.wq, &wait);
173158 }
174
-EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
159
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
175160
176161 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
177162 {
178163 hlist_del_init(&n->link);
179
- if (n->halted)
180
- smp_send_reschedule(n->cpu);
181
- else if (swq_has_sleeper(&n->wq))
164
+ if (swq_has_sleeper(&n->wq))
182165 swake_up_one(&n->wq);
183166 }
184167
....@@ -187,12 +170,13 @@
187170 int i;
188171
189172 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
190
- struct hlist_node *p, *next;
191173 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
174
+ struct kvm_task_sleep_node *n;
175
+ struct hlist_node *p, *next;
176
+
192177 raw_spin_lock(&b->lock);
193178 hlist_for_each_safe(p, next, &b->list) {
194
- struct kvm_task_sleep_node *n =
195
- hlist_entry(p, typeof(*n), link);
179
+ n = hlist_entry(p, typeof(*n), link);
196180 if (n->cpu == smp_processor_id())
197181 apf_task_wake_one(n);
198182 }
....@@ -204,7 +188,7 @@
204188 {
205189 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
206190 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
207
- struct kvm_task_sleep_node *n;
191
+ struct kvm_task_sleep_node *n, *dummy = NULL;
208192
209193 if (token == ~0) {
210194 apf_task_wake_all();
....@@ -216,74 +200,115 @@
216200 n = _find_apf_task(b, token);
217201 if (!n) {
218202 /*
219
- * async PF was not yet handled.
220
- * Add dummy entry for the token.
203
+ * Async #PF not yet handled, add a dummy entry for the token.
204
+ * Allocating the token must be down outside of the raw lock
205
+ * as the allocator is preemptible on PREEMPT_RT kernels.
221206 */
222
- n = kzalloc(sizeof(*n), GFP_ATOMIC);
223
- if (!n) {
224
- /*
225
- * Allocation failed! Busy wait while other cpu
226
- * handles async PF.
227
- */
207
+ if (!dummy) {
228208 raw_spin_unlock(&b->lock);
229
- cpu_relax();
209
+ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
210
+
211
+ /*
212
+ * Continue looping on allocation failure, eventually
213
+ * the async #PF will be handled and allocating a new
214
+ * node will be unnecessary.
215
+ */
216
+ if (!dummy)
217
+ cpu_relax();
218
+
219
+ /*
220
+ * Recheck for async #PF completion before enqueueing
221
+ * the dummy token to avoid duplicate list entries.
222
+ */
230223 goto again;
231224 }
232
- n->token = token;
233
- n->cpu = smp_processor_id();
234
- init_swait_queue_head(&n->wq);
235
- hlist_add_head(&n->link, &b->list);
236
- } else
225
+ dummy->token = token;
226
+ dummy->cpu = smp_processor_id();
227
+ init_swait_queue_head(&dummy->wq);
228
+ hlist_add_head(&dummy->link, &b->list);
229
+ dummy = NULL;
230
+ } else {
237231 apf_task_wake_one(n);
232
+ }
238233 raw_spin_unlock(&b->lock);
239
- return;
234
+
235
+ /* A dummy token might be allocated and ultimately not used. */
236
+ if (dummy)
237
+ kfree(dummy);
240238 }
241239 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
242240
243
-u32 kvm_read_and_reset_pf_reason(void)
241
+noinstr u32 kvm_read_and_reset_apf_flags(void)
244242 {
245
- u32 reason = 0;
243
+ u32 flags = 0;
246244
247245 if (__this_cpu_read(apf_reason.enabled)) {
248
- reason = __this_cpu_read(apf_reason.reason);
249
- __this_cpu_write(apf_reason.reason, 0);
246
+ flags = __this_cpu_read(apf_reason.flags);
247
+ __this_cpu_write(apf_reason.flags, 0);
250248 }
251249
252
- return reason;
250
+ return flags;
253251 }
254
-EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
255
-NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
252
+EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
256253
257
-dotraplinkage void
258
-do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
254
+noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
259255 {
260
- enum ctx_state prev_state;
256
+ u32 flags = kvm_read_and_reset_apf_flags();
257
+ irqentry_state_t state;
261258
262
- switch (kvm_read_and_reset_pf_reason()) {
263
- default:
264
- do_page_fault(regs, error_code);
265
- break;
266
- case KVM_PV_REASON_PAGE_NOT_PRESENT:
267
- /* page is swapped out by the host. */
268
- prev_state = exception_enter();
269
- kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
270
- exception_exit(prev_state);
271
- break;
272
- case KVM_PV_REASON_PAGE_READY:
273
- rcu_irq_enter();
274
- kvm_async_pf_task_wake((u32)read_cr2());
275
- rcu_irq_exit();
276
- break;
259
+ if (!flags)
260
+ return false;
261
+
262
+ state = irqentry_enter(regs);
263
+ instrumentation_begin();
264
+
265
+ /*
266
+ * If the host managed to inject an async #PF into an interrupt
267
+ * disabled region, then die hard as this is not going to end well
268
+ * and the host side is seriously broken.
269
+ */
270
+ if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
271
+ panic("Host injected async #PF in interrupt disabled region\n");
272
+
273
+ if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
274
+ if (unlikely(!(user_mode(regs))))
275
+ panic("Host injected async #PF in kernel mode\n");
276
+ /* Page is swapped out by the host. */
277
+ kvm_async_pf_task_wait_schedule(token);
278
+ } else {
279
+ WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
277280 }
281
+
282
+ instrumentation_end();
283
+ irqentry_exit(regs, state);
284
+ return true;
278285 }
279
-NOKPROBE_SYMBOL(do_async_page_fault);
286
+
287
+DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
288
+{
289
+ struct pt_regs *old_regs = set_irq_regs(regs);
290
+ u32 token;
291
+
292
+ ack_APIC_irq();
293
+
294
+ inc_irq_stat(irq_hv_callback_count);
295
+
296
+ if (__this_cpu_read(apf_reason.enabled)) {
297
+ token = __this_cpu_read(apf_reason.token);
298
+ kvm_async_pf_task_wake(token);
299
+ __this_cpu_write(apf_reason.token, 0);
300
+ wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
301
+ }
302
+
303
+ set_irq_regs(old_regs);
304
+}
280305
281306 static void __init paravirt_ops_setup(void)
282307 {
283308 pv_info.name = "KVM";
284309
285310 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
286
- pv_cpu_ops.io_delay = kvm_io_delay;
311
+ pv_ops.cpu.io_delay = kvm_io_delay;
287312
288313 #ifdef CONFIG_X86_IO_APIC
289314 no_timer_check = 1;
....@@ -299,8 +324,8 @@
299324 return;
300325
301326 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
302
- pr_info("kvm-stealtime: cpu %d, msr %llx\n",
303
- cpu, (unsigned long long) slow_virt_to_phys(st));
327
+ pr_info("stealtime: cpu %d, msr %llx\n", cpu,
328
+ (unsigned long long) slow_virt_to_phys(st));
304329 }
305330
306331 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
....@@ -321,28 +346,27 @@
321346
322347 static void kvm_guest_cpu_init(void)
323348 {
324
- if (!kvm_para_available())
325
- return;
326
-
327
- if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
349
+ if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
328350 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
329351
330
-#ifdef CONFIG_PREEMPT
331
- pa |= KVM_ASYNC_PF_SEND_ALWAYS;
332
-#endif
333
- pa |= KVM_ASYNC_PF_ENABLED;
352
+ WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
353
+
354
+ pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
355
+ pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
334356
335357 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
336358 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
337359
360
+ wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
361
+
338362 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
339363 __this_cpu_write(apf_reason.enabled, 1);
340
- printk(KERN_INFO"KVM setup async PF for cpu %d\n",
341
- smp_processor_id());
364
+ pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
342365 }
343366
344367 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
345368 unsigned long pa;
369
+
346370 /* Size alignment is implied but just to make it explicit. */
347371 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
348372 __this_cpu_write(kvm_apic_eoi, 0);
....@@ -363,8 +387,15 @@
363387 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
364388 __this_cpu_write(apf_reason.enabled, 0);
365389
366
- printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
367
- smp_processor_id());
390
+ pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
391
+}
392
+
393
+static void kvm_disable_steal_time(void)
394
+{
395
+ if (!has_steal_clock)
396
+ return;
397
+
398
+ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
368399 }
369400
370401 static void kvm_pv_guest_cpu_reboot(void *unused)
....@@ -409,14 +440,6 @@
409440 return steal;
410441 }
411442
412
-void kvm_disable_steal_time(void)
413
-{
414
- if (!has_steal_clock)
415
- return;
416
-
417
- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
418
-}
419
-
420443 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
421444 {
422445 early_set_memory_decrypted((unsigned long) ptr, size);
....@@ -444,7 +467,50 @@
444467 }
445468 }
446469
470
+static bool pv_tlb_flush_supported(void)
471
+{
472
+ return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
473
+ !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
474
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
475
+}
476
+
477
+static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
478
+
479
+static void kvm_guest_cpu_offline(bool shutdown)
480
+{
481
+ kvm_disable_steal_time();
482
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
483
+ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
484
+ kvm_pv_disable_apf();
485
+ if (!shutdown)
486
+ apf_task_wake_all();
487
+ kvmclock_disable();
488
+}
489
+
490
+static int kvm_cpu_online(unsigned int cpu)
491
+{
492
+ unsigned long flags;
493
+
494
+ local_irq_save(flags);
495
+ kvm_guest_cpu_init();
496
+ local_irq_restore(flags);
497
+ return 0;
498
+}
499
+
447500 #ifdef CONFIG_SMP
501
+
502
+static bool pv_ipi_supported(void)
503
+{
504
+ return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
505
+}
506
+
507
+static bool pv_sched_yield_supported(void)
508
+{
509
+ return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
510
+ !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
511
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
512
+}
513
+
448514 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
449515
450516 static void __send_ipi_mask(const struct cpumask *mask, int vector)
....@@ -480,12 +546,13 @@
480546 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
481547 ipi_bitmap <<= min - apic_id;
482548 min = apic_id;
483
- } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
549
+ } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
484550 max = apic_id < max ? max : apic_id;
485551 } else {
486552 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
487553 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
488
- WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
554
+ WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
555
+ ret);
489556 min = max = apic_id;
490557 ipi_bitmap = 0;
491558 }
....@@ -495,7 +562,8 @@
495562 if (ipi_bitmap) {
496563 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
497564 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
498
- WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
565
+ WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
566
+ ret);
499567 }
500568
501569 local_irq_restore(flags);
....@@ -509,23 +577,13 @@
509577 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
510578 {
511579 unsigned int this_cpu = smp_processor_id();
512
- struct cpumask new_mask;
580
+ struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
513581 const struct cpumask *local_mask;
514582
515
- cpumask_copy(&new_mask, mask);
516
- cpumask_clear_cpu(this_cpu, &new_mask);
517
- local_mask = &new_mask;
583
+ cpumask_copy(new_mask, mask);
584
+ cpumask_clear_cpu(this_cpu, new_mask);
585
+ local_mask = new_mask;
518586 __send_ipi_mask(local_mask, vector);
519
-}
520
-
521
-static void kvm_send_ipi_allbutself(int vector)
522
-{
523
- kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
524
-}
525
-
526
-static void kvm_send_ipi_all(int vector)
527
-{
528
- __send_ipi_mask(cpu_online_mask, vector);
529587 }
530588
531589 /*
....@@ -535,16 +593,22 @@
535593 {
536594 apic->send_IPI_mask = kvm_send_ipi_mask;
537595 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
538
- apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
539
- apic->send_IPI_all = kvm_send_ipi_all;
540
- pr_info("KVM setup pv IPIs\n");
596
+ pr_info("setup PV IPIs\n");
541597 }
542598
543
-static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
599
+static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
544600 {
545
- native_smp_prepare_cpus(max_cpus);
546
- if (kvm_para_has_hint(KVM_HINTS_REALTIME))
547
- static_branch_disable(&virt_spin_lock_key);
601
+ int cpu;
602
+
603
+ native_send_call_func_ipi(mask);
604
+
605
+ /* Make sure other vCPUs get a chance to run if they need to. */
606
+ for_each_cpu(cpu, mask) {
607
+ if (vcpu_is_preempted(cpu)) {
608
+ kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
609
+ break;
610
+ }
611
+ }
548612 }
549613
550614 static void __init kvm_smp_prepare_boot_cpu(void)
....@@ -560,38 +624,60 @@
560624 kvm_spinlock_init();
561625 }
562626
563
-static void kvm_guest_cpu_offline(void)
564
-{
565
- kvm_disable_steal_time();
566
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
567
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
568
- kvm_pv_disable_apf();
569
- apf_task_wake_all();
570
-}
571
-
572
-static int kvm_cpu_online(unsigned int cpu)
573
-{
574
- local_irq_disable();
575
- kvm_guest_cpu_init();
576
- local_irq_enable();
577
- return 0;
578
-}
579
-
580627 static int kvm_cpu_down_prepare(unsigned int cpu)
581628 {
582
- local_irq_disable();
583
- kvm_guest_cpu_offline();
584
- local_irq_enable();
629
+ unsigned long flags;
630
+
631
+ local_irq_save(flags);
632
+ kvm_guest_cpu_offline(false);
633
+ local_irq_restore(flags);
585634 return 0;
586635 }
636
+
587637 #endif
588638
589
-static void __init kvm_apf_trap_init(void)
639
+static int kvm_suspend(void)
590640 {
591
- update_intr_gate(X86_TRAP_PF, async_page_fault);
641
+ u64 val = 0;
642
+
643
+ kvm_guest_cpu_offline(false);
644
+
645
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
646
+ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
647
+ rdmsrl(MSR_KVM_POLL_CONTROL, val);
648
+ has_guest_poll = !(val & 1);
649
+#endif
650
+ return 0;
592651 }
593652
594
-static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
653
+static void kvm_resume(void)
654
+{
655
+ kvm_cpu_online(raw_smp_processor_id());
656
+
657
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
658
+ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
659
+ wrmsrl(MSR_KVM_POLL_CONTROL, 0);
660
+#endif
661
+}
662
+
663
+static struct syscore_ops kvm_syscore_ops = {
664
+ .suspend = kvm_suspend,
665
+ .resume = kvm_resume,
666
+};
667
+
668
+/*
669
+ * After a PV feature is registered, the host will keep writing to the
670
+ * registered memory location. If the guest happens to shutdown, this memory
671
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
672
+ * means a random memory location will be kept being written.
673
+ */
674
+#ifdef CONFIG_KEXEC_CORE
675
+static void kvm_crash_shutdown(struct pt_regs *regs)
676
+{
677
+ kvm_guest_cpu_offline(true);
678
+ native_machine_crash_shutdown(regs);
679
+}
680
+#endif
595681
596682 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
597683 const struct flush_tlb_info *info)
....@@ -599,7 +685,7 @@
599685 u8 state;
600686 int cpu;
601687 struct kvm_steal_time *src;
602
- struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
688
+ struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
603689
604690 cpumask_copy(flushmask, cpumask);
605691 /*
....@@ -623,41 +709,49 @@
623709 {
624710 int i;
625711
626
- if (!kvm_para_available())
627
- return;
628
-
629712 paravirt_ops_setup();
630713 register_reboot_notifier(&kvm_pv_reboot_nb);
631714 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
632715 raw_spin_lock_init(&async_pf_sleepers[i].lock);
633
- if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
634
- x86_init.irqs.trap_init = kvm_apf_trap_init;
635716
636717 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
637718 has_steal_clock = 1;
638
- pv_time_ops.steal_clock = kvm_steal_clock;
719
+ pv_ops.time.steal_clock = kvm_steal_clock;
639720 }
640721
641
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
642
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
643
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
644
- pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
645
- pv_mmu_ops.tlb_remove_table = tlb_remove_table;
722
+ if (pv_tlb_flush_supported()) {
723
+ pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
724
+ pv_ops.mmu.tlb_remove_table = tlb_remove_table;
725
+ pr_info("KVM setup pv remote TLB flush\n");
646726 }
647727
648728 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
649729 apic_set_eoi_write(kvm_guest_apic_eoi_write);
650730
731
+ if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
732
+ static_branch_enable(&kvm_async_pf_enabled);
733
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
734
+ }
735
+
651736 #ifdef CONFIG_SMP
652
- smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
653737 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
738
+ if (pv_sched_yield_supported()) {
739
+ smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
740
+ pr_info("setup PV sched yield\n");
741
+ }
654742 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
655743 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
656
- pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
744
+ pr_err("failed to install cpu hotplug callbacks\n");
657745 #else
658746 sev_map_percpu_data();
659747 kvm_guest_cpu_init();
660748 #endif
749
+
750
+#ifdef CONFIG_KEXEC_CORE
751
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
752
+#endif
753
+
754
+ register_syscore_ops(&kvm_syscore_ops);
661755
662756 /*
663757 * Hard lockup detection is enabled by default. Disable it, as guests
....@@ -703,6 +797,7 @@
703797 {
704798 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
705799 }
800
+EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
706801
707802 static uint32_t __init kvm_detect(void)
708803 {
....@@ -712,7 +807,7 @@
712807 static void __init kvm_apic_init(void)
713808 {
714809 #if defined(CONFIG_SMP)
715
- if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
810
+ if (pv_ipi_supported())
716811 kvm_setup_pv_ipi();
717812 #endif
718813 }
....@@ -723,13 +818,34 @@
723818 x86_platform.apic_post_init = kvm_apic_init;
724819 }
725820
821
+#if defined(CONFIG_AMD_MEM_ENCRYPT)
822
+static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
823
+{
824
+ /* RAX and CPL are already in the GHCB */
825
+ ghcb_set_rbx(ghcb, regs->bx);
826
+ ghcb_set_rcx(ghcb, regs->cx);
827
+ ghcb_set_rdx(ghcb, regs->dx);
828
+ ghcb_set_rsi(ghcb, regs->si);
829
+}
830
+
831
+static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
832
+{
833
+ /* No checking of the return state needed */
834
+ return true;
835
+}
836
+#endif
837
+
726838 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
727
- .name = "KVM",
728
- .detect = kvm_detect,
729
- .type = X86_HYPER_KVM,
730
- .init.guest_late_init = kvm_guest_init,
731
- .init.x2apic_available = kvm_para_available,
732
- .init.init_platform = kvm_init_platform,
839
+ .name = "KVM",
840
+ .detect = kvm_detect,
841
+ .type = X86_HYPER_KVM,
842
+ .init.guest_late_init = kvm_guest_init,
843
+ .init.x2apic_available = kvm_para_available,
844
+ .init.init_platform = kvm_init_platform,
845
+#if defined(CONFIG_AMD_MEM_ENCRYPT)
846
+ .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
847
+ .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
848
+#endif
733849 };
734850
735851 static __init int activate_jump_labels(void)
....@@ -744,23 +860,31 @@
744860 }
745861 arch_initcall(activate_jump_labels);
746862
747
-static __init int kvm_setup_pv_tlb_flush(void)
863
+static __init int kvm_alloc_cpumask(void)
748864 {
749865 int cpu;
866
+ bool alloc = false;
750867
751
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
752
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
753
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
868
+ if (!kvm_para_available() || nopv)
869
+ return 0;
870
+
871
+ if (pv_tlb_flush_supported())
872
+ alloc = true;
873
+
874
+#if defined(CONFIG_SMP)
875
+ if (pv_ipi_supported())
876
+ alloc = true;
877
+#endif
878
+
879
+ if (alloc)
754880 for_each_possible_cpu(cpu) {
755
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
881
+ zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
756882 GFP_KERNEL, cpu_to_node(cpu));
757883 }
758
- pr_info("KVM setup pv remote TLB flush\n");
759
- }
760884
761885 return 0;
762886 }
763
-arch_initcall(kvm_setup_pv_tlb_flush);
887
+arch_initcall(kvm_alloc_cpumask);
764888
765889 #ifdef CONFIG_PARAVIRT_SPINLOCKS
766890
....@@ -829,7 +953,7 @@
829953 "movq __per_cpu_offset(,%rdi,8), %rax;"
830954 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
831955 "setne %al;"
832
-"ret;"
956
+ASM_RET
833957 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
834958 ".popsection");
835959
....@@ -840,29 +964,91 @@
840964 */
841965 void __init kvm_spinlock_init(void)
842966 {
843
- if (!kvm_para_available())
967
+ /*
968
+ * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
969
+ * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
970
+ * preferred over native qspinlock when vCPU is preempted.
971
+ */
972
+ if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
973
+ pr_info("PV spinlocks disabled, no host support\n");
844974 return;
845
- /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
846
- if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
847
- return;
975
+ }
848976
849
- if (kvm_para_has_hint(KVM_HINTS_REALTIME))
850
- return;
977
+ /*
978
+ * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
979
+ * are available.
980
+ */
981
+ if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
982
+ pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
983
+ goto out;
984
+ }
851985
852
- /* Don't use the pvqspinlock code if there is only 1 vCPU. */
853
- if (num_possible_cpus() == 1)
854
- return;
986
+ if (num_possible_cpus() == 1) {
987
+ pr_info("PV spinlocks disabled, single CPU\n");
988
+ goto out;
989
+ }
990
+
991
+ if (nopvspin) {
992
+ pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
993
+ goto out;
994
+ }
995
+
996
+ pr_info("PV spinlocks enabled\n");
855997
856998 __pv_init_lock_hash();
857
- pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
858
- pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
859
- pv_lock_ops.wait = kvm_wait;
860
- pv_lock_ops.kick = kvm_kick_cpu;
999
+ pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
1000
+ pv_ops.lock.queued_spin_unlock =
1001
+ PV_CALLEE_SAVE(__pv_queued_spin_unlock);
1002
+ pv_ops.lock.wait = kvm_wait;
1003
+ pv_ops.lock.kick = kvm_kick_cpu;
8611004
8621005 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
863
- pv_lock_ops.vcpu_is_preempted =
1006
+ pv_ops.lock.vcpu_is_preempted =
8641007 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
8651008 }
1009
+ /*
1010
+ * When PV spinlock is enabled which is preferred over
1011
+ * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
1012
+ * Just disable it anyway.
1013
+ */
1014
+out:
1015
+ static_branch_disable(&virt_spin_lock_key);
8661016 }
8671017
8681018 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
1019
+
1020
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
1021
+
1022
+static void kvm_disable_host_haltpoll(void *i)
1023
+{
1024
+ wrmsrl(MSR_KVM_POLL_CONTROL, 0);
1025
+}
1026
+
1027
+static void kvm_enable_host_haltpoll(void *i)
1028
+{
1029
+ wrmsrl(MSR_KVM_POLL_CONTROL, 1);
1030
+}
1031
+
1032
+void arch_haltpoll_enable(unsigned int cpu)
1033
+{
1034
+ if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
1035
+ pr_err_once("host does not support poll control\n");
1036
+ pr_err_once("host upgrade recommended\n");
1037
+ return;
1038
+ }
1039
+
1040
+ /* Enable guest halt poll disables host halt poll */
1041
+ smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
1042
+}
1043
+EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1044
+
1045
+void arch_haltpoll_disable(unsigned int cpu)
1046
+{
1047
+ if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1048
+ return;
1049
+
1050
+ /* Disable guest halt poll enables host halt poll */
1051
+ smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
1052
+}
1053
+EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1054
+#endif