forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/arch/powerpc/kvm/powerpc.c
....@@ -1,16 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * This program is free software; you can redistribute it and/or modify
3
- * it under the terms of the GNU General Public License, version 2, as
4
- * published by the Free Software Foundation.
5
- *
6
- * This program is distributed in the hope that it will be useful,
7
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
8
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9
- * GNU General Public License for more details.
10
- *
11
- * You should have received a copy of the GNU General Public License
12
- * along with this program; if not, write to the Free Software
13
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
143 *
154 * Copyright IBM Corp. 2007
165 *
....@@ -42,6 +31,7 @@
4231 #include <asm/hvcall.h>
4332 #include <asm/plpar_wrappers.h>
4433 #endif
34
+#include <asm/ultravisor.h>
4535
4636 #include "timing.h"
4737 #include "irq.h"
....@@ -289,7 +279,7 @@
289279 }
290280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
291281
292
-int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
282
+int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
293283 {
294284 enum emulation_result er;
295285 int r;
....@@ -305,7 +295,7 @@
305295 r = RESUME_GUEST;
306296 break;
307297 case EMULATE_DO_MMIO:
308
- run->exit_reason = KVM_EXIT_MMIO;
298
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
309299 /* We must reload nonvolatiles because "update" load/store
310300 * instructions modify register state. */
311301 /* Future optimization: only reload non-volatiles if they were
....@@ -336,9 +326,16 @@
336326 {
337327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
338328 struct kvmppc_pte pte;
339
- int r;
329
+ int r = -EINVAL;
340330
341331 vcpu->stat.st++;
332
+
333
+ if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
334
+ r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
335
+ size);
336
+
337
+ if ((!r) || (r == -EAGAIN))
338
+ return r;
342339
343340 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
344341 XLATE_WRITE, &pte);
....@@ -372,9 +369,16 @@
372369 {
373370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
374371 struct kvmppc_pte pte;
375
- int rc;
372
+ int rc = -EINVAL;
376373
377374 vcpu->stat.ld++;
375
+
376
+ if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
377
+ rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
378
+ size);
379
+
380
+ if ((!rc) || (rc == -EAGAIN))
381
+ return rc;
378382
379383 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
380384 XLATE_READ, &pte);
....@@ -399,7 +403,10 @@
399403 return EMULATE_DONE;
400404 }
401405
402
- if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
406
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
407
+ rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
408
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
409
+ if (rc)
403410 return EMULATE_DO_MMIO;
404411
405412 return EMULATE_DONE;
....@@ -411,14 +418,14 @@
411418 return 0;
412419 }
413420
414
-int kvm_arch_hardware_setup(void)
421
+int kvm_arch_hardware_setup(void *opaque)
415422 {
416423 return 0;
417424 }
418425
419
-void kvm_arch_check_processor_compat(void *rtn)
426
+int kvm_arch_check_processor_compat(void *opaque)
420427 {
421
- *(int *)rtn = kvmppc_core_check_processor_compat();
428
+ return kvmppc_core_check_processor_compat();
422429 }
423430
424431 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
....@@ -454,16 +461,6 @@
454461 return -EINVAL;
455462 }
456463
457
-bool kvm_arch_has_vcpu_debugfs(void)
458
-{
459
- return false;
460
-}
461
-
462
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
463
-{
464
- return 0;
465
-}
466
-
467464 void kvm_arch_destroy_vm(struct kvm *kvm)
468465 {
469466 unsigned int i;
....@@ -480,7 +477,7 @@
480477 #endif
481478
482479 kvm_for_each_vcpu(i, vcpu, kvm)
483
- kvm_arch_vcpu_free(vcpu);
480
+ kvm_vcpu_destroy(vcpu);
484481
485482 mutex_lock(&kvm->lock);
486483 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
....@@ -523,13 +520,14 @@
523520 case KVM_CAP_PPC_UNSET_IRQ:
524521 case KVM_CAP_PPC_IRQ_LEVEL:
525522 case KVM_CAP_ENABLE_CAP:
526
- case KVM_CAP_ENABLE_CAP_VM:
527523 case KVM_CAP_ONE_REG:
528524 case KVM_CAP_IOEVENTFD:
529525 case KVM_CAP_DEVICE_CTRL:
530526 case KVM_CAP_IMMEDIATE_EXIT:
527
+ case KVM_CAP_SET_GUEST_DEBUG:
531528 r = 1;
532529 break;
530
+ case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
533531 case KVM_CAP_PPC_PAIRED_SINGLES:
534532 case KVM_CAP_PPC_OSI:
535533 case KVM_CAP_PPC_GET_PVINFO:
....@@ -562,6 +560,17 @@
562560 case KVM_CAP_PPC_GET_CPU_CHAR:
563561 r = 1;
564562 break;
563
+#ifdef CONFIG_KVM_XIVE
564
+ case KVM_CAP_PPC_IRQ_XIVE:
565
+ /*
566
+ * We need XIVE to be enabled on the platform (implies
567
+ * a POWER9 processor) and the PowerNV platform, as
568
+ * nested is not yet supported.
569
+ */
570
+ r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
571
+ kvmppc_xive_native_supported();
572
+ break;
573
+#endif
565574
566575 case KVM_CAP_PPC_ALLOC_HTAB:
567576 r = hv_enabled;
....@@ -602,7 +611,12 @@
602611 r = !!(hv_enabled && radix_enabled());
603612 break;
604613 case KVM_CAP_PPC_MMU_HASH_V3:
605
- r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
614
+ r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
615
+ cpu_has_feature(CPU_FTR_HVMODE));
616
+ break;
617
+ case KVM_CAP_PPC_NESTED_HV:
618
+ r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
619
+ !kvmppc_hv_ops->enable_nested(NULL));
606620 break;
607621 #endif
608622 case KVM_CAP_SYNC_MMU:
....@@ -630,9 +644,6 @@
630644 r = num_present_cpus();
631645 else
632646 r = num_online_cpus();
633
- break;
634
- case KVM_CAP_NR_MEMSLOTS:
635
- r = KVM_USER_MEM_SLOTS;
636647 break;
637648 case KVM_CAP_MAX_VCPUS:
638649 r = KVM_MAX_VCPUS;
....@@ -662,6 +673,12 @@
662673 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
663674 break;
664675 #endif
676
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
677
+ case KVM_CAP_PPC_SECURE_GUEST:
678
+ r = hv_enabled && kvmppc_hv_ops->enable_svm &&
679
+ !kvmppc_hv_ops->enable_svm(NULL);
680
+ break;
681
+#endif
665682 default:
666683 r = 0;
667684 break;
....@@ -676,16 +693,9 @@
676693 return -EINVAL;
677694 }
678695
679
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
680
- struct kvm_memory_slot *dont)
696
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
681697 {
682
- kvmppc_core_free_memslot(kvm, free, dont);
683
-}
684
-
685
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
686
- unsigned long npages)
687
-{
688
- return kvmppc_core_create_memslot(kvm, slot, npages);
698
+ kvmppc_core_free_memslot(kvm, slot);
689699 }
690700
691701 int kvm_arch_prepare_memory_region(struct kvm *kvm,
....@@ -693,16 +703,16 @@
693703 const struct kvm_userspace_memory_region *mem,
694704 enum kvm_mr_change change)
695705 {
696
- return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
706
+ return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
697707 }
698708
699709 void kvm_arch_commit_memory_region(struct kvm *kvm,
700710 const struct kvm_userspace_memory_region *mem,
701
- const struct kvm_memory_slot *old,
711
+ struct kvm_memory_slot *old,
702712 const struct kvm_memory_slot *new,
703713 enum kvm_mr_change change)
704714 {
705
- kvmppc_core_commit_memory_region(kvm, mem, old, new);
715
+ kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
706716 }
707717
708718 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
....@@ -711,51 +721,9 @@
711721 kvmppc_core_flush_memslot(kvm, slot);
712722 }
713723
714
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
724
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
715725 {
716
- struct kvm_vcpu *vcpu;
717
- vcpu = kvmppc_core_vcpu_create(kvm, id);
718
- if (!IS_ERR(vcpu)) {
719
- vcpu->arch.wqp = &vcpu->wq;
720
- kvmppc_create_vcpu_debugfs(vcpu, id);
721
- }
722
- return vcpu;
723
-}
724
-
725
-void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
726
-{
727
-}
728
-
729
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
730
-{
731
- /* Make sure we're not using the vcpu anymore */
732
- hrtimer_cancel(&vcpu->arch.dec_timer);
733
-
734
- kvmppc_remove_vcpu_debugfs(vcpu);
735
-
736
- switch (vcpu->arch.irq_type) {
737
- case KVMPPC_IRQ_MPIC:
738
- kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
739
- break;
740
- case KVMPPC_IRQ_XICS:
741
- if (xive_enabled())
742
- kvmppc_xive_cleanup_vcpu(vcpu);
743
- else
744
- kvmppc_xics_free_icp(vcpu);
745
- break;
746
- }
747
-
748
- kvmppc_core_vcpu_free(vcpu);
749
-}
750
-
751
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
752
-{
753
- kvm_arch_vcpu_free(vcpu);
754
-}
755
-
756
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
757
-{
758
- return kvmppc_core_pending_dec(vcpu);
726
+ return 0;
759727 }
760728
761729 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
....@@ -768,9 +736,9 @@
768736 return HRTIMER_NORESTART;
769737 }
770738
771
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
739
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
772740 {
773
- int ret;
741
+ int err;
774742
775743 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
776744 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
....@@ -779,14 +747,57 @@
779747 #ifdef CONFIG_KVM_EXIT_TIMING
780748 mutex_init(&vcpu->arch.exit_timing_lock);
781749 #endif
782
- ret = kvmppc_subarch_vcpu_init(vcpu);
783
- return ret;
750
+ err = kvmppc_subarch_vcpu_init(vcpu);
751
+ if (err)
752
+ return err;
753
+
754
+ err = kvmppc_core_vcpu_create(vcpu);
755
+ if (err)
756
+ goto out_vcpu_uninit;
757
+
758
+ vcpu->arch.waitp = &vcpu->wait;
759
+ kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
760
+ return 0;
761
+
762
+out_vcpu_uninit:
763
+ kvmppc_subarch_vcpu_uninit(vcpu);
764
+ return err;
784765 }
785766
786
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
767
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
787768 {
788
- kvmppc_mmu_destroy(vcpu);
769
+}
770
+
771
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
772
+{
773
+ /* Make sure we're not using the vcpu anymore */
774
+ hrtimer_cancel(&vcpu->arch.dec_timer);
775
+
776
+ kvmppc_remove_vcpu_debugfs(vcpu);
777
+
778
+ switch (vcpu->arch.irq_type) {
779
+ case KVMPPC_IRQ_MPIC:
780
+ kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
781
+ break;
782
+ case KVMPPC_IRQ_XICS:
783
+ if (xics_on_xive())
784
+ kvmppc_xive_cleanup_vcpu(vcpu);
785
+ else
786
+ kvmppc_xics_free_icp(vcpu);
787
+ break;
788
+ case KVMPPC_IRQ_XIVE:
789
+ kvmppc_xive_native_cleanup_vcpu(vcpu);
790
+ break;
791
+ }
792
+
793
+ kvmppc_core_vcpu_free(vcpu);
794
+
789795 kvmppc_subarch_vcpu_uninit(vcpu);
796
+}
797
+
798
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
799
+{
800
+ return kvmppc_core_pending_dec(vcpu);
790801 }
791802
792803 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
....@@ -1099,10 +1110,10 @@
10991110 #define dp_to_sp(x) (x)
11001111 #endif /* CONFIG_PPC_FPU */
11011112
1102
-static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1103
- struct kvm_run *run)
1113
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
11041114 {
1105
- u64 uninitialized_var(gpr);
1115
+ struct kvm_run *run = vcpu->run;
1116
+ u64 gpr;
11061117
11071118 if (run->mmio.len > sizeof(gpr)) {
11081119 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
....@@ -1198,15 +1209,24 @@
11981209 kvmppc_set_vmx_byte(vcpu, gpr);
11991210 break;
12001211 #endif
1212
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1213
+ case KVM_MMIO_REG_NESTED_GPR:
1214
+ if (kvmppc_need_byteswap(vcpu))
1215
+ gpr = swab64(gpr);
1216
+ kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1217
+ sizeof(gpr));
1218
+ break;
1219
+#endif
12011220 default:
12021221 BUG();
12031222 }
12041223 }
12051224
1206
-static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1225
+static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
12071226 unsigned int rt, unsigned int bytes,
12081227 int is_default_endian, int sign_extend)
12091228 {
1229
+ struct kvm_run *run = vcpu->run;
12101230 int idx, ret;
12111231 bool host_swabbed;
12121232
....@@ -1240,7 +1260,7 @@
12401260 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12411261
12421262 if (!ret) {
1243
- kvmppc_complete_mmio_load(vcpu, run);
1263
+ kvmppc_complete_mmio_load(vcpu);
12441264 vcpu->mmio_needed = 0;
12451265 return EMULATE_DONE;
12461266 }
....@@ -1248,24 +1268,24 @@
12481268 return EMULATE_DO_MMIO;
12491269 }
12501270
1251
-int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1271
+int kvmppc_handle_load(struct kvm_vcpu *vcpu,
12521272 unsigned int rt, unsigned int bytes,
12531273 int is_default_endian)
12541274 {
1255
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1275
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
12561276 }
12571277 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
12581278
12591279 /* Same as above, but sign extends */
1260
-int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1280
+int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
12611281 unsigned int rt, unsigned int bytes,
12621282 int is_default_endian)
12631283 {
1264
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1284
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
12651285 }
12661286
12671287 #ifdef CONFIG_VSX
1268
-int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1288
+int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
12691289 unsigned int rt, unsigned int bytes,
12701290 int is_default_endian, int mmio_sign_extend)
12711291 {
....@@ -1276,13 +1296,13 @@
12761296 return EMULATE_FAIL;
12771297
12781298 while (vcpu->arch.mmio_vsx_copy_nums) {
1279
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1299
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
12801300 is_default_endian, mmio_sign_extend);
12811301
12821302 if (emulated != EMULATE_DONE)
12831303 break;
12841304
1285
- vcpu->arch.paddr_accessed += run->mmio.len;
1305
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
12861306
12871307 vcpu->arch.mmio_vsx_copy_nums--;
12881308 vcpu->arch.mmio_vsx_offset++;
....@@ -1291,9 +1311,10 @@
12911311 }
12921312 #endif /* CONFIG_VSX */
12931313
1294
-int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1314
+int kvmppc_handle_store(struct kvm_vcpu *vcpu,
12951315 u64 val, unsigned int bytes, int is_default_endian)
12961316 {
1317
+ struct kvm_run *run = vcpu->run;
12971318 void *data = run->mmio.data;
12981319 int idx, ret;
12991320 bool host_swabbed;
....@@ -1407,7 +1428,7 @@
14071428 return result;
14081429 }
14091430
1410
-int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1431
+int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
14111432 int rs, unsigned int bytes, int is_default_endian)
14121433 {
14131434 u64 val;
....@@ -1423,13 +1444,13 @@
14231444 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
14241445 return EMULATE_FAIL;
14251446
1426
- emulated = kvmppc_handle_store(run, vcpu,
1447
+ emulated = kvmppc_handle_store(vcpu,
14271448 val, bytes, is_default_endian);
14281449
14291450 if (emulated != EMULATE_DONE)
14301451 break;
14311452
1432
- vcpu->arch.paddr_accessed += run->mmio.len;
1453
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
14331454
14341455 vcpu->arch.mmio_vsx_copy_nums--;
14351456 vcpu->arch.mmio_vsx_offset++;
....@@ -1438,19 +1459,19 @@
14381459 return emulated;
14391460 }
14401461
1441
-static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1442
- struct kvm_run *run)
1462
+static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
14431463 {
1464
+ struct kvm_run *run = vcpu->run;
14441465 enum emulation_result emulated = EMULATE_FAIL;
14451466 int r;
14461467
14471468 vcpu->arch.paddr_accessed += run->mmio.len;
14481469
14491470 if (!vcpu->mmio_is_write) {
1450
- emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1471
+ emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
14511472 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
14521473 } else {
1453
- emulated = kvmppc_handle_vsx_store(run, vcpu,
1474
+ emulated = kvmppc_handle_vsx_store(vcpu,
14541475 vcpu->arch.io_gpr, run->mmio.len, 1);
14551476 }
14561477
....@@ -1474,22 +1495,22 @@
14741495 #endif /* CONFIG_VSX */
14751496
14761497 #ifdef CONFIG_ALTIVEC
1477
-int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1498
+int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
14781499 unsigned int rt, unsigned int bytes, int is_default_endian)
14791500 {
14801501 enum emulation_result emulated = EMULATE_DONE;
14811502
1482
- if (vcpu->arch.mmio_vsx_copy_nums > 2)
1503
+ if (vcpu->arch.mmio_vmx_copy_nums > 2)
14831504 return EMULATE_FAIL;
14841505
14851506 while (vcpu->arch.mmio_vmx_copy_nums) {
1486
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1507
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
14871508 is_default_endian, 0);
14881509
14891510 if (emulated != EMULATE_DONE)
14901511 break;
14911512
1492
- vcpu->arch.paddr_accessed += run->mmio.len;
1513
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
14931514 vcpu->arch.mmio_vmx_copy_nums--;
14941515 vcpu->arch.mmio_vmx_offset++;
14951516 }
....@@ -1569,14 +1590,14 @@
15691590 return result;
15701591 }
15711592
1572
-int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1593
+int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
15731594 unsigned int rs, unsigned int bytes, int is_default_endian)
15741595 {
15751596 u64 val = 0;
15761597 unsigned int index = rs & KVM_MMIO_REG_MASK;
15771598 enum emulation_result emulated = EMULATE_DONE;
15781599
1579
- if (vcpu->arch.mmio_vsx_copy_nums > 2)
1600
+ if (vcpu->arch.mmio_vmx_copy_nums > 2)
15801601 return EMULATE_FAIL;
15811602
15821603 vcpu->arch.io_gpr = rs;
....@@ -1604,12 +1625,12 @@
16041625 return EMULATE_FAIL;
16051626 }
16061627
1607
- emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1628
+ emulated = kvmppc_handle_store(vcpu, val, bytes,
16081629 is_default_endian);
16091630 if (emulated != EMULATE_DONE)
16101631 break;
16111632
1612
- vcpu->arch.paddr_accessed += run->mmio.len;
1633
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
16131634 vcpu->arch.mmio_vmx_copy_nums--;
16141635 vcpu->arch.mmio_vmx_offset++;
16151636 }
....@@ -1617,19 +1638,19 @@
16171638 return emulated;
16181639 }
16191640
1620
-static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1621
- struct kvm_run *run)
1641
+static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
16221642 {
1643
+ struct kvm_run *run = vcpu->run;
16231644 enum emulation_result emulated = EMULATE_FAIL;
16241645 int r;
16251646
16261647 vcpu->arch.paddr_accessed += run->mmio.len;
16271648
16281649 if (!vcpu->mmio_is_write) {
1629
- emulated = kvmppc_handle_vmx_load(run, vcpu,
1650
+ emulated = kvmppc_handle_vmx_load(vcpu,
16301651 vcpu->arch.io_gpr, run->mmio.len, 1);
16311652 } else {
1632
- emulated = kvmppc_handle_vmx_store(run, vcpu,
1653
+ emulated = kvmppc_handle_vmx_store(vcpu,
16331654 vcpu->arch.io_gpr, run->mmio.len, 1);
16341655 }
16351656
....@@ -1749,8 +1770,9 @@
17491770 return r;
17501771 }
17511772
1752
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1773
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
17531774 {
1775
+ struct kvm_run *run = vcpu->run;
17541776 int r;
17551777
17561778 vcpu_load(vcpu);
....@@ -1758,7 +1780,7 @@
17581780 if (vcpu->mmio_needed) {
17591781 vcpu->mmio_needed = 0;
17601782 if (!vcpu->mmio_is_write)
1761
- kvmppc_complete_mmio_load(vcpu, run);
1783
+ kvmppc_complete_mmio_load(vcpu);
17621784 #ifdef CONFIG_VSX
17631785 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
17641786 vcpu->arch.mmio_vsx_copy_nums--;
....@@ -1766,7 +1788,7 @@
17661788 }
17671789
17681790 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1769
- r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1791
+ r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
17701792 if (r == RESUME_HOST) {
17711793 vcpu->mmio_needed = 1;
17721794 goto out;
....@@ -1780,7 +1802,7 @@
17801802 }
17811803
17821804 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1783
- r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1805
+ r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
17841806 if (r == RESUME_HOST) {
17851807 vcpu->mmio_needed = 1;
17861808 goto out;
....@@ -1813,7 +1835,7 @@
18131835 if (run->immediate_exit)
18141836 r = -EINTR;
18151837 else
1816
- r = kvmppc_vcpu_run(run, vcpu);
1838
+ r = kvmppc_vcpu_run(vcpu);
18171839
18181840 kvm_sigset_deactivate(vcpu);
18191841
....@@ -1913,7 +1935,7 @@
19131935 r = -EPERM;
19141936 dev = kvm_device_from_filp(f.file);
19151937 if (dev) {
1916
- if (xive_enabled())
1938
+ if (xics_on_xive())
19171939 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
19181940 else
19191941 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
....@@ -1923,6 +1945,30 @@
19231945 break;
19241946 }
19251947 #endif /* CONFIG_KVM_XICS */
1948
+#ifdef CONFIG_KVM_XIVE
1949
+ case KVM_CAP_PPC_IRQ_XIVE: {
1950
+ struct fd f;
1951
+ struct kvm_device *dev;
1952
+
1953
+ r = -EBADF;
1954
+ f = fdget(cap->args[0]);
1955
+ if (!f.file)
1956
+ break;
1957
+
1958
+ r = -ENXIO;
1959
+ if (!xive_enabled())
1960
+ break;
1961
+
1962
+ r = -EPERM;
1963
+ dev = kvm_device_from_filp(f.file);
1964
+ if (dev)
1965
+ r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1966
+ cap->args[1]);
1967
+
1968
+ fdput(f);
1969
+ break;
1970
+ }
1971
+#endif /* CONFIG_KVM_XIVE */
19261972 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
19271973 case KVM_CAP_PPC_FWNMI:
19281974 r = -EINVAL;
....@@ -2090,8 +2136,8 @@
20902136 }
20912137
20922138
2093
-static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2094
- struct kvm_enable_cap *cap)
2139
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2140
+ struct kvm_enable_cap *cap)
20952141 {
20962142 int r;
20972143
....@@ -2125,6 +2171,22 @@
21252171 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
21262172 break;
21272173 }
2174
+
2175
+ case KVM_CAP_PPC_NESTED_HV:
2176
+ r = -EINVAL;
2177
+ if (!is_kvmppc_hv_enabled(kvm) ||
2178
+ !kvm->arch.kvm_ops->enable_nested)
2179
+ break;
2180
+ r = kvm->arch.kvm_ops->enable_nested(kvm);
2181
+ break;
2182
+#endif
2183
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2184
+ case KVM_CAP_PPC_SECURE_GUEST:
2185
+ r = -EINVAL;
2186
+ if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2187
+ break;
2188
+ r = kvm->arch.kvm_ops->enable_svm(kvm);
2189
+ break;
21282190 #endif
21292191 default:
21302192 r = -EINVAL;
....@@ -2163,10 +2225,12 @@
21632225 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
21642226 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
21652227 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2166
- KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2228
+ KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2229
+ KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
21672230 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
21682231 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2169
- KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2232
+ KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2233
+ KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
21702234 }
21712235 return 0;
21722236 }
....@@ -2225,12 +2289,16 @@
22252289 if (have_fw_feat(fw_features, "enabled",
22262290 "fw-count-cache-disabled"))
22272291 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2292
+ if (have_fw_feat(fw_features, "enabled",
2293
+ "fw-count-cache-flush-bcctr2,0,0"))
2294
+ cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
22282295 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
22292296 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
22302297 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
22312298 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
22322299 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2233
- KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2300
+ KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2301
+ KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
22342302
22352303 if (have_fw_feat(fw_features, "enabled",
22362304 "speculation-policy-favor-security"))
....@@ -2241,9 +2309,13 @@
22412309 if (!have_fw_feat(fw_features, "disabled",
22422310 "needs-spec-barrier-for-bound-checks"))
22432311 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2312
+ if (have_fw_feat(fw_features, "enabled",
2313
+ "needs-count-cache-flush-on-context-switch"))
2314
+ cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
22442315 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
22452316 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2246
- KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2317
+ KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2318
+ KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
22472319
22482320 of_node_put(fw_features);
22492321 }
....@@ -2269,15 +2341,6 @@
22692341 goto out;
22702342 }
22712343
2272
- break;
2273
- }
2274
- case KVM_ENABLE_CAP:
2275
- {
2276
- struct kvm_enable_cap cap;
2277
- r = -EFAULT;
2278
- if (copy_from_user(&cap, argp, sizeof(cap)))
2279
- goto out;
2280
- r = kvm_vm_ioctl_enable_cap(kvm, &cap);
22812344 break;
22822345 }
22832346 #ifdef CONFIG_SPAPR_TCE_IOMMU
....@@ -2362,6 +2425,16 @@
23622425 r = -EFAULT;
23632426 break;
23642427 }
2428
+ case KVM_PPC_SVM_OFF: {
2429
+ struct kvm *kvm = filp->private_data;
2430
+
2431
+ r = 0;
2432
+ if (!kvm->arch.kvm_ops->svm_off)
2433
+ goto out;
2434
+
2435
+ r = kvm->arch.kvm_ops->svm_off(kvm);
2436
+ break;
2437
+ }
23652438 default: {
23662439 struct kvm *kvm = filp->private_data;
23672440 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);