.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | | - * This program is free software; you can redistribute it and/or modify |
---|
3 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
4 | | - * published by the Free Software Foundation. |
---|
5 | | - * |
---|
6 | | - * This program is distributed in the hope that it will be useful, |
---|
7 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
8 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
9 | | - * GNU General Public License for more details. |
---|
10 | | - * |
---|
11 | | - * You should have received a copy of the GNU General Public License |
---|
12 | | - * along with this program; if not, write to the Free Software |
---|
13 | | - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
---|
14 | 3 | * |
---|
15 | 4 | * Copyright IBM Corp. 2007 |
---|
16 | 5 | * |
---|
.. | .. |
---|
42 | 31 | #include <asm/hvcall.h> |
---|
43 | 32 | #include <asm/plpar_wrappers.h> |
---|
44 | 33 | #endif |
---|
| 34 | +#include <asm/ultravisor.h> |
---|
45 | 35 | |
---|
46 | 36 | #include "timing.h" |
---|
47 | 37 | #include "irq.h" |
---|
.. | .. |
---|
289 | 279 | } |
---|
290 | 280 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
---|
291 | 281 | |
---|
292 | | -int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
---|
| 282 | +int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) |
---|
293 | 283 | { |
---|
294 | 284 | enum emulation_result er; |
---|
295 | 285 | int r; |
---|
.. | .. |
---|
305 | 295 | r = RESUME_GUEST; |
---|
306 | 296 | break; |
---|
307 | 297 | case EMULATE_DO_MMIO: |
---|
308 | | - run->exit_reason = KVM_EXIT_MMIO; |
---|
| 298 | + vcpu->run->exit_reason = KVM_EXIT_MMIO; |
---|
309 | 299 | /* We must reload nonvolatiles because "update" load/store |
---|
310 | 300 | * instructions modify register state. */ |
---|
311 | 301 | /* Future optimization: only reload non-volatiles if they were |
---|
.. | .. |
---|
336 | 326 | { |
---|
337 | 327 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
---|
338 | 328 | struct kvmppc_pte pte; |
---|
339 | | - int r; |
---|
| 329 | + int r = -EINVAL; |
---|
340 | 330 | |
---|
341 | 331 | vcpu->stat.st++; |
---|
| 332 | + |
---|
| 333 | + if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) |
---|
| 334 | + r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, |
---|
| 335 | + size); |
---|
| 336 | + |
---|
| 337 | + if ((!r) || (r == -EAGAIN)) |
---|
| 338 | + return r; |
---|
342 | 339 | |
---|
343 | 340 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
---|
344 | 341 | XLATE_WRITE, &pte); |
---|
.. | .. |
---|
372 | 369 | { |
---|
373 | 370 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
---|
374 | 371 | struct kvmppc_pte pte; |
---|
375 | | - int rc; |
---|
| 372 | + int rc = -EINVAL; |
---|
376 | 373 | |
---|
377 | 374 | vcpu->stat.ld++; |
---|
| 375 | + |
---|
| 376 | + if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) |
---|
| 377 | + rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, |
---|
| 378 | + size); |
---|
| 379 | + |
---|
| 380 | + if ((!rc) || (rc == -EAGAIN)) |
---|
| 381 | + return rc; |
---|
378 | 382 | |
---|
379 | 383 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
---|
380 | 384 | XLATE_READ, &pte); |
---|
.. | .. |
---|
399 | 403 | return EMULATE_DONE; |
---|
400 | 404 | } |
---|
401 | 405 | |
---|
402 | | - if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
---|
| 406 | + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
---|
| 407 | + rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); |
---|
| 408 | + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
---|
| 409 | + if (rc) |
---|
403 | 410 | return EMULATE_DO_MMIO; |
---|
404 | 411 | |
---|
405 | 412 | return EMULATE_DONE; |
---|
.. | .. |
---|
411 | 418 | return 0; |
---|
412 | 419 | } |
---|
413 | 420 | |
---|
414 | | -int kvm_arch_hardware_setup(void) |
---|
| 421 | +int kvm_arch_hardware_setup(void *opaque) |
---|
415 | 422 | { |
---|
416 | 423 | return 0; |
---|
417 | 424 | } |
---|
418 | 425 | |
---|
419 | | -void kvm_arch_check_processor_compat(void *rtn) |
---|
| 426 | +int kvm_arch_check_processor_compat(void *opaque) |
---|
420 | 427 | { |
---|
421 | | - *(int *)rtn = kvmppc_core_check_processor_compat(); |
---|
| 428 | + return kvmppc_core_check_processor_compat(); |
---|
422 | 429 | } |
---|
423 | 430 | |
---|
424 | 431 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
---|
.. | .. |
---|
454 | 461 | return -EINVAL; |
---|
455 | 462 | } |
---|
456 | 463 | |
---|
457 | | -bool kvm_arch_has_vcpu_debugfs(void) |
---|
458 | | -{ |
---|
459 | | - return false; |
---|
460 | | -} |
---|
461 | | - |
---|
462 | | -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) |
---|
463 | | -{ |
---|
464 | | - return 0; |
---|
465 | | -} |
---|
466 | | - |
---|
467 | 464 | void kvm_arch_destroy_vm(struct kvm *kvm) |
---|
468 | 465 | { |
---|
469 | 466 | unsigned int i; |
---|
.. | .. |
---|
480 | 477 | #endif |
---|
481 | 478 | |
---|
482 | 479 | kvm_for_each_vcpu(i, vcpu, kvm) |
---|
483 | | - kvm_arch_vcpu_free(vcpu); |
---|
| 480 | + kvm_vcpu_destroy(vcpu); |
---|
484 | 481 | |
---|
485 | 482 | mutex_lock(&kvm->lock); |
---|
486 | 483 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
---|
.. | .. |
---|
523 | 520 | case KVM_CAP_PPC_UNSET_IRQ: |
---|
524 | 521 | case KVM_CAP_PPC_IRQ_LEVEL: |
---|
525 | 522 | case KVM_CAP_ENABLE_CAP: |
---|
526 | | - case KVM_CAP_ENABLE_CAP_VM: |
---|
527 | 523 | case KVM_CAP_ONE_REG: |
---|
528 | 524 | case KVM_CAP_IOEVENTFD: |
---|
529 | 525 | case KVM_CAP_DEVICE_CTRL: |
---|
530 | 526 | case KVM_CAP_IMMEDIATE_EXIT: |
---|
| 527 | + case KVM_CAP_SET_GUEST_DEBUG: |
---|
531 | 528 | r = 1; |
---|
532 | 529 | break; |
---|
| 530 | + case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: |
---|
533 | 531 | case KVM_CAP_PPC_PAIRED_SINGLES: |
---|
534 | 532 | case KVM_CAP_PPC_OSI: |
---|
535 | 533 | case KVM_CAP_PPC_GET_PVINFO: |
---|
.. | .. |
---|
562 | 560 | case KVM_CAP_PPC_GET_CPU_CHAR: |
---|
563 | 561 | r = 1; |
---|
564 | 562 | break; |
---|
| 563 | +#ifdef CONFIG_KVM_XIVE |
---|
| 564 | + case KVM_CAP_PPC_IRQ_XIVE: |
---|
| 565 | + /* |
---|
| 566 | + * We need XIVE to be enabled on the platform (implies |
---|
| 567 | + * a POWER9 processor) and the PowerNV platform, as |
---|
| 568 | + * nested is not yet supported. |
---|
| 569 | + */ |
---|
| 570 | + r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) && |
---|
| 571 | + kvmppc_xive_native_supported(); |
---|
| 572 | + break; |
---|
| 573 | +#endif |
---|
565 | 574 | |
---|
566 | 575 | case KVM_CAP_PPC_ALLOC_HTAB: |
---|
567 | 576 | r = hv_enabled; |
---|
.. | .. |
---|
602 | 611 | r = !!(hv_enabled && radix_enabled()); |
---|
603 | 612 | break; |
---|
604 | 613 | case KVM_CAP_PPC_MMU_HASH_V3: |
---|
605 | | - r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300)); |
---|
| 614 | + r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && |
---|
| 615 | + cpu_has_feature(CPU_FTR_HVMODE)); |
---|
| 616 | + break; |
---|
| 617 | + case KVM_CAP_PPC_NESTED_HV: |
---|
| 618 | + r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && |
---|
| 619 | + !kvmppc_hv_ops->enable_nested(NULL)); |
---|
606 | 620 | break; |
---|
607 | 621 | #endif |
---|
608 | 622 | case KVM_CAP_SYNC_MMU: |
---|
.. | .. |
---|
630 | 644 | r = num_present_cpus(); |
---|
631 | 645 | else |
---|
632 | 646 | r = num_online_cpus(); |
---|
633 | | - break; |
---|
634 | | - case KVM_CAP_NR_MEMSLOTS: |
---|
635 | | - r = KVM_USER_MEM_SLOTS; |
---|
636 | 647 | break; |
---|
637 | 648 | case KVM_CAP_MAX_VCPUS: |
---|
638 | 649 | r = KVM_MAX_VCPUS; |
---|
.. | .. |
---|
662 | 673 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); |
---|
663 | 674 | break; |
---|
664 | 675 | #endif |
---|
| 676 | +#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
---|
| 677 | + case KVM_CAP_PPC_SECURE_GUEST: |
---|
| 678 | + r = hv_enabled && kvmppc_hv_ops->enable_svm && |
---|
| 679 | + !kvmppc_hv_ops->enable_svm(NULL); |
---|
| 680 | + break; |
---|
| 681 | +#endif |
---|
665 | 682 | default: |
---|
666 | 683 | r = 0; |
---|
667 | 684 | break; |
---|
.. | .. |
---|
676 | 693 | return -EINVAL; |
---|
677 | 694 | } |
---|
678 | 695 | |
---|
679 | | -void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
---|
680 | | - struct kvm_memory_slot *dont) |
---|
| 696 | +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
---|
681 | 697 | { |
---|
682 | | - kvmppc_core_free_memslot(kvm, free, dont); |
---|
683 | | -} |
---|
684 | | - |
---|
685 | | -int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
---|
686 | | - unsigned long npages) |
---|
687 | | -{ |
---|
688 | | - return kvmppc_core_create_memslot(kvm, slot, npages); |
---|
| 698 | + kvmppc_core_free_memslot(kvm, slot); |
---|
689 | 699 | } |
---|
690 | 700 | |
---|
691 | 701 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
---|
.. | .. |
---|
693 | 703 | const struct kvm_userspace_memory_region *mem, |
---|
694 | 704 | enum kvm_mr_change change) |
---|
695 | 705 | { |
---|
696 | | - return kvmppc_core_prepare_memory_region(kvm, memslot, mem); |
---|
| 706 | + return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change); |
---|
697 | 707 | } |
---|
698 | 708 | |
---|
699 | 709 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
---|
700 | 710 | const struct kvm_userspace_memory_region *mem, |
---|
701 | | - const struct kvm_memory_slot *old, |
---|
| 711 | + struct kvm_memory_slot *old, |
---|
702 | 712 | const struct kvm_memory_slot *new, |
---|
703 | 713 | enum kvm_mr_change change) |
---|
704 | 714 | { |
---|
705 | | - kvmppc_core_commit_memory_region(kvm, mem, old, new); |
---|
| 715 | + kvmppc_core_commit_memory_region(kvm, mem, old, new, change); |
---|
706 | 716 | } |
---|
707 | 717 | |
---|
708 | 718 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
---|
.. | .. |
---|
711 | 721 | kvmppc_core_flush_memslot(kvm, slot); |
---|
712 | 722 | } |
---|
713 | 723 | |
---|
714 | | -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
---|
| 724 | +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
---|
715 | 725 | { |
---|
716 | | - struct kvm_vcpu *vcpu; |
---|
717 | | - vcpu = kvmppc_core_vcpu_create(kvm, id); |
---|
718 | | - if (!IS_ERR(vcpu)) { |
---|
719 | | - vcpu->arch.wqp = &vcpu->wq; |
---|
720 | | - kvmppc_create_vcpu_debugfs(vcpu, id); |
---|
721 | | - } |
---|
722 | | - return vcpu; |
---|
723 | | -} |
---|
724 | | - |
---|
725 | | -void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
---|
726 | | -{ |
---|
727 | | -} |
---|
728 | | - |
---|
729 | | -void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
---|
730 | | -{ |
---|
731 | | - /* Make sure we're not using the vcpu anymore */ |
---|
732 | | - hrtimer_cancel(&vcpu->arch.dec_timer); |
---|
733 | | - |
---|
734 | | - kvmppc_remove_vcpu_debugfs(vcpu); |
---|
735 | | - |
---|
736 | | - switch (vcpu->arch.irq_type) { |
---|
737 | | - case KVMPPC_IRQ_MPIC: |
---|
738 | | - kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); |
---|
739 | | - break; |
---|
740 | | - case KVMPPC_IRQ_XICS: |
---|
741 | | - if (xive_enabled()) |
---|
742 | | - kvmppc_xive_cleanup_vcpu(vcpu); |
---|
743 | | - else |
---|
744 | | - kvmppc_xics_free_icp(vcpu); |
---|
745 | | - break; |
---|
746 | | - } |
---|
747 | | - |
---|
748 | | - kvmppc_core_vcpu_free(vcpu); |
---|
749 | | -} |
---|
750 | | - |
---|
751 | | -void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
---|
752 | | -{ |
---|
753 | | - kvm_arch_vcpu_free(vcpu); |
---|
754 | | -} |
---|
755 | | - |
---|
756 | | -int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
---|
757 | | -{ |
---|
758 | | - return kvmppc_core_pending_dec(vcpu); |
---|
| 726 | + return 0; |
---|
759 | 727 | } |
---|
760 | 728 | |
---|
761 | 729 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
---|
.. | .. |
---|
768 | 736 | return HRTIMER_NORESTART; |
---|
769 | 737 | } |
---|
770 | 738 | |
---|
771 | | -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
---|
| 739 | +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
---|
772 | 740 | { |
---|
773 | | - int ret; |
---|
| 741 | + int err; |
---|
774 | 742 | |
---|
775 | 743 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
---|
776 | 744 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
---|
.. | .. |
---|
779 | 747 | #ifdef CONFIG_KVM_EXIT_TIMING |
---|
780 | 748 | mutex_init(&vcpu->arch.exit_timing_lock); |
---|
781 | 749 | #endif |
---|
782 | | - ret = kvmppc_subarch_vcpu_init(vcpu); |
---|
783 | | - return ret; |
---|
| 750 | + err = kvmppc_subarch_vcpu_init(vcpu); |
---|
| 751 | + if (err) |
---|
| 752 | + return err; |
---|
| 753 | + |
---|
| 754 | + err = kvmppc_core_vcpu_create(vcpu); |
---|
| 755 | + if (err) |
---|
| 756 | + goto out_vcpu_uninit; |
---|
| 757 | + |
---|
| 758 | + vcpu->arch.waitp = &vcpu->wait; |
---|
| 759 | + kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); |
---|
| 760 | + return 0; |
---|
| 761 | + |
---|
| 762 | +out_vcpu_uninit: |
---|
| 763 | + kvmppc_subarch_vcpu_uninit(vcpu); |
---|
| 764 | + return err; |
---|
784 | 765 | } |
---|
785 | 766 | |
---|
786 | | -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
---|
| 767 | +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
---|
787 | 768 | { |
---|
788 | | - kvmppc_mmu_destroy(vcpu); |
---|
| 769 | +} |
---|
| 770 | + |
---|
| 771 | +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
---|
| 772 | +{ |
---|
| 773 | + /* Make sure we're not using the vcpu anymore */ |
---|
| 774 | + hrtimer_cancel(&vcpu->arch.dec_timer); |
---|
| 775 | + |
---|
| 776 | + kvmppc_remove_vcpu_debugfs(vcpu); |
---|
| 777 | + |
---|
| 778 | + switch (vcpu->arch.irq_type) { |
---|
| 779 | + case KVMPPC_IRQ_MPIC: |
---|
| 780 | + kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); |
---|
| 781 | + break; |
---|
| 782 | + case KVMPPC_IRQ_XICS: |
---|
| 783 | + if (xics_on_xive()) |
---|
| 784 | + kvmppc_xive_cleanup_vcpu(vcpu); |
---|
| 785 | + else |
---|
| 786 | + kvmppc_xics_free_icp(vcpu); |
---|
| 787 | + break; |
---|
| 788 | + case KVMPPC_IRQ_XIVE: |
---|
| 789 | + kvmppc_xive_native_cleanup_vcpu(vcpu); |
---|
| 790 | + break; |
---|
| 791 | + } |
---|
| 792 | + |
---|
| 793 | + kvmppc_core_vcpu_free(vcpu); |
---|
| 794 | + |
---|
789 | 795 | kvmppc_subarch_vcpu_uninit(vcpu); |
---|
| 796 | +} |
---|
| 797 | + |
---|
| 798 | +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
---|
| 799 | +{ |
---|
| 800 | + return kvmppc_core_pending_dec(vcpu); |
---|
790 | 801 | } |
---|
791 | 802 | |
---|
792 | 803 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
---|
.. | .. |
---|
1099 | 1110 | #define dp_to_sp(x) (x) |
---|
1100 | 1111 | #endif /* CONFIG_PPC_FPU */ |
---|
1101 | 1112 | |
---|
1102 | | -static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
---|
1103 | | - struct kvm_run *run) |
---|
| 1113 | +static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) |
---|
1104 | 1114 | { |
---|
1105 | | - u64 uninitialized_var(gpr); |
---|
| 1115 | + struct kvm_run *run = vcpu->run; |
---|
| 1116 | + u64 gpr; |
---|
1106 | 1117 | |
---|
1107 | 1118 | if (run->mmio.len > sizeof(gpr)) { |
---|
1108 | 1119 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
---|
.. | .. |
---|
1198 | 1209 | kvmppc_set_vmx_byte(vcpu, gpr); |
---|
1199 | 1210 | break; |
---|
1200 | 1211 | #endif |
---|
| 1212 | +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
---|
| 1213 | + case KVM_MMIO_REG_NESTED_GPR: |
---|
| 1214 | + if (kvmppc_need_byteswap(vcpu)) |
---|
| 1215 | + gpr = swab64(gpr); |
---|
| 1216 | + kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, |
---|
| 1217 | + sizeof(gpr)); |
---|
| 1218 | + break; |
---|
| 1219 | +#endif |
---|
1201 | 1220 | default: |
---|
1202 | 1221 | BUG(); |
---|
1203 | 1222 | } |
---|
1204 | 1223 | } |
---|
1205 | 1224 | |
---|
1206 | | -static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1225 | +static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, |
---|
1207 | 1226 | unsigned int rt, unsigned int bytes, |
---|
1208 | 1227 | int is_default_endian, int sign_extend) |
---|
1209 | 1228 | { |
---|
| 1229 | + struct kvm_run *run = vcpu->run; |
---|
1210 | 1230 | int idx, ret; |
---|
1211 | 1231 | bool host_swabbed; |
---|
1212 | 1232 | |
---|
.. | .. |
---|
1240 | 1260 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
---|
1241 | 1261 | |
---|
1242 | 1262 | if (!ret) { |
---|
1243 | | - kvmppc_complete_mmio_load(vcpu, run); |
---|
| 1263 | + kvmppc_complete_mmio_load(vcpu); |
---|
1244 | 1264 | vcpu->mmio_needed = 0; |
---|
1245 | 1265 | return EMULATE_DONE; |
---|
1246 | 1266 | } |
---|
.. | .. |
---|
1248 | 1268 | return EMULATE_DO_MMIO; |
---|
1249 | 1269 | } |
---|
1250 | 1270 | |
---|
1251 | | -int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1271 | +int kvmppc_handle_load(struct kvm_vcpu *vcpu, |
---|
1252 | 1272 | unsigned int rt, unsigned int bytes, |
---|
1253 | 1273 | int is_default_endian) |
---|
1254 | 1274 | { |
---|
1255 | | - return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); |
---|
| 1275 | + return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); |
---|
1256 | 1276 | } |
---|
1257 | 1277 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
---|
1258 | 1278 | |
---|
1259 | 1279 | /* Same as above, but sign extends */ |
---|
1260 | | -int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1280 | +int kvmppc_handle_loads(struct kvm_vcpu *vcpu, |
---|
1261 | 1281 | unsigned int rt, unsigned int bytes, |
---|
1262 | 1282 | int is_default_endian) |
---|
1263 | 1283 | { |
---|
1264 | | - return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
---|
| 1284 | + return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); |
---|
1265 | 1285 | } |
---|
1266 | 1286 | |
---|
1267 | 1287 | #ifdef CONFIG_VSX |
---|
1268 | | -int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1288 | +int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, |
---|
1269 | 1289 | unsigned int rt, unsigned int bytes, |
---|
1270 | 1290 | int is_default_endian, int mmio_sign_extend) |
---|
1271 | 1291 | { |
---|
.. | .. |
---|
1276 | 1296 | return EMULATE_FAIL; |
---|
1277 | 1297 | |
---|
1278 | 1298 | while (vcpu->arch.mmio_vsx_copy_nums) { |
---|
1279 | | - emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
---|
| 1299 | + emulated = __kvmppc_handle_load(vcpu, rt, bytes, |
---|
1280 | 1300 | is_default_endian, mmio_sign_extend); |
---|
1281 | 1301 | |
---|
1282 | 1302 | if (emulated != EMULATE_DONE) |
---|
1283 | 1303 | break; |
---|
1284 | 1304 | |
---|
1285 | | - vcpu->arch.paddr_accessed += run->mmio.len; |
---|
| 1305 | + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
---|
1286 | 1306 | |
---|
1287 | 1307 | vcpu->arch.mmio_vsx_copy_nums--; |
---|
1288 | 1308 | vcpu->arch.mmio_vsx_offset++; |
---|
.. | .. |
---|
1291 | 1311 | } |
---|
1292 | 1312 | #endif /* CONFIG_VSX */ |
---|
1293 | 1313 | |
---|
1294 | | -int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1314 | +int kvmppc_handle_store(struct kvm_vcpu *vcpu, |
---|
1295 | 1315 | u64 val, unsigned int bytes, int is_default_endian) |
---|
1296 | 1316 | { |
---|
| 1317 | + struct kvm_run *run = vcpu->run; |
---|
1297 | 1318 | void *data = run->mmio.data; |
---|
1298 | 1319 | int idx, ret; |
---|
1299 | 1320 | bool host_swabbed; |
---|
.. | .. |
---|
1407 | 1428 | return result; |
---|
1408 | 1429 | } |
---|
1409 | 1430 | |
---|
1410 | | -int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1431 | +int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, |
---|
1411 | 1432 | int rs, unsigned int bytes, int is_default_endian) |
---|
1412 | 1433 | { |
---|
1413 | 1434 | u64 val; |
---|
.. | .. |
---|
1423 | 1444 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) |
---|
1424 | 1445 | return EMULATE_FAIL; |
---|
1425 | 1446 | |
---|
1426 | | - emulated = kvmppc_handle_store(run, vcpu, |
---|
| 1447 | + emulated = kvmppc_handle_store(vcpu, |
---|
1427 | 1448 | val, bytes, is_default_endian); |
---|
1428 | 1449 | |
---|
1429 | 1450 | if (emulated != EMULATE_DONE) |
---|
1430 | 1451 | break; |
---|
1431 | 1452 | |
---|
1432 | | - vcpu->arch.paddr_accessed += run->mmio.len; |
---|
| 1453 | + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
---|
1433 | 1454 | |
---|
1434 | 1455 | vcpu->arch.mmio_vsx_copy_nums--; |
---|
1435 | 1456 | vcpu->arch.mmio_vsx_offset++; |
---|
.. | .. |
---|
1438 | 1459 | return emulated; |
---|
1439 | 1460 | } |
---|
1440 | 1461 | |
---|
1441 | | -static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, |
---|
1442 | | - struct kvm_run *run) |
---|
| 1462 | +static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) |
---|
1443 | 1463 | { |
---|
| 1464 | + struct kvm_run *run = vcpu->run; |
---|
1444 | 1465 | enum emulation_result emulated = EMULATE_FAIL; |
---|
1445 | 1466 | int r; |
---|
1446 | 1467 | |
---|
1447 | 1468 | vcpu->arch.paddr_accessed += run->mmio.len; |
---|
1448 | 1469 | |
---|
1449 | 1470 | if (!vcpu->mmio_is_write) { |
---|
1450 | | - emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, |
---|
| 1471 | + emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, |
---|
1451 | 1472 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); |
---|
1452 | 1473 | } else { |
---|
1453 | | - emulated = kvmppc_handle_vsx_store(run, vcpu, |
---|
| 1474 | + emulated = kvmppc_handle_vsx_store(vcpu, |
---|
1454 | 1475 | vcpu->arch.io_gpr, run->mmio.len, 1); |
---|
1455 | 1476 | } |
---|
1456 | 1477 | |
---|
.. | .. |
---|
1474 | 1495 | #endif /* CONFIG_VSX */ |
---|
1475 | 1496 | |
---|
1476 | 1497 | #ifdef CONFIG_ALTIVEC |
---|
1477 | | -int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1498 | +int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, |
---|
1478 | 1499 | unsigned int rt, unsigned int bytes, int is_default_endian) |
---|
1479 | 1500 | { |
---|
1480 | 1501 | enum emulation_result emulated = EMULATE_DONE; |
---|
1481 | 1502 | |
---|
1482 | | - if (vcpu->arch.mmio_vsx_copy_nums > 2) |
---|
| 1503 | + if (vcpu->arch.mmio_vmx_copy_nums > 2) |
---|
1483 | 1504 | return EMULATE_FAIL; |
---|
1484 | 1505 | |
---|
1485 | 1506 | while (vcpu->arch.mmio_vmx_copy_nums) { |
---|
1486 | | - emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
---|
| 1507 | + emulated = __kvmppc_handle_load(vcpu, rt, bytes, |
---|
1487 | 1508 | is_default_endian, 0); |
---|
1488 | 1509 | |
---|
1489 | 1510 | if (emulated != EMULATE_DONE) |
---|
1490 | 1511 | break; |
---|
1491 | 1512 | |
---|
1492 | | - vcpu->arch.paddr_accessed += run->mmio.len; |
---|
| 1513 | + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
---|
1493 | 1514 | vcpu->arch.mmio_vmx_copy_nums--; |
---|
1494 | 1515 | vcpu->arch.mmio_vmx_offset++; |
---|
1495 | 1516 | } |
---|
.. | .. |
---|
1569 | 1590 | return result; |
---|
1570 | 1591 | } |
---|
1571 | 1592 | |
---|
1572 | | -int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 1593 | +int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, |
---|
1573 | 1594 | unsigned int rs, unsigned int bytes, int is_default_endian) |
---|
1574 | 1595 | { |
---|
1575 | 1596 | u64 val = 0; |
---|
1576 | 1597 | unsigned int index = rs & KVM_MMIO_REG_MASK; |
---|
1577 | 1598 | enum emulation_result emulated = EMULATE_DONE; |
---|
1578 | 1599 | |
---|
1579 | | - if (vcpu->arch.mmio_vsx_copy_nums > 2) |
---|
| 1600 | + if (vcpu->arch.mmio_vmx_copy_nums > 2) |
---|
1580 | 1601 | return EMULATE_FAIL; |
---|
1581 | 1602 | |
---|
1582 | 1603 | vcpu->arch.io_gpr = rs; |
---|
.. | .. |
---|
1604 | 1625 | return EMULATE_FAIL; |
---|
1605 | 1626 | } |
---|
1606 | 1627 | |
---|
1607 | | - emulated = kvmppc_handle_store(run, vcpu, val, bytes, |
---|
| 1628 | + emulated = kvmppc_handle_store(vcpu, val, bytes, |
---|
1608 | 1629 | is_default_endian); |
---|
1609 | 1630 | if (emulated != EMULATE_DONE) |
---|
1610 | 1631 | break; |
---|
1611 | 1632 | |
---|
1612 | | - vcpu->arch.paddr_accessed += run->mmio.len; |
---|
| 1633 | + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
---|
1613 | 1634 | vcpu->arch.mmio_vmx_copy_nums--; |
---|
1614 | 1635 | vcpu->arch.mmio_vmx_offset++; |
---|
1615 | 1636 | } |
---|
.. | .. |
---|
1617 | 1638 | return emulated; |
---|
1618 | 1639 | } |
---|
1619 | 1640 | |
---|
1620 | | -static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, |
---|
1621 | | - struct kvm_run *run) |
---|
| 1641 | +static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) |
---|
1622 | 1642 | { |
---|
| 1643 | + struct kvm_run *run = vcpu->run; |
---|
1623 | 1644 | enum emulation_result emulated = EMULATE_FAIL; |
---|
1624 | 1645 | int r; |
---|
1625 | 1646 | |
---|
1626 | 1647 | vcpu->arch.paddr_accessed += run->mmio.len; |
---|
1627 | 1648 | |
---|
1628 | 1649 | if (!vcpu->mmio_is_write) { |
---|
1629 | | - emulated = kvmppc_handle_vmx_load(run, vcpu, |
---|
| 1650 | + emulated = kvmppc_handle_vmx_load(vcpu, |
---|
1630 | 1651 | vcpu->arch.io_gpr, run->mmio.len, 1); |
---|
1631 | 1652 | } else { |
---|
1632 | | - emulated = kvmppc_handle_vmx_store(run, vcpu, |
---|
| 1653 | + emulated = kvmppc_handle_vmx_store(vcpu, |
---|
1633 | 1654 | vcpu->arch.io_gpr, run->mmio.len, 1); |
---|
1634 | 1655 | } |
---|
1635 | 1656 | |
---|
.. | .. |
---|
1749 | 1770 | return r; |
---|
1750 | 1771 | } |
---|
1751 | 1772 | |
---|
1752 | | -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
---|
| 1773 | +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
---|
1753 | 1774 | { |
---|
| 1775 | + struct kvm_run *run = vcpu->run; |
---|
1754 | 1776 | int r; |
---|
1755 | 1777 | |
---|
1756 | 1778 | vcpu_load(vcpu); |
---|
.. | .. |
---|
1758 | 1780 | if (vcpu->mmio_needed) { |
---|
1759 | 1781 | vcpu->mmio_needed = 0; |
---|
1760 | 1782 | if (!vcpu->mmio_is_write) |
---|
1761 | | - kvmppc_complete_mmio_load(vcpu, run); |
---|
| 1783 | + kvmppc_complete_mmio_load(vcpu); |
---|
1762 | 1784 | #ifdef CONFIG_VSX |
---|
1763 | 1785 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
---|
1764 | 1786 | vcpu->arch.mmio_vsx_copy_nums--; |
---|
.. | .. |
---|
1766 | 1788 | } |
---|
1767 | 1789 | |
---|
1768 | 1790 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
---|
1769 | | - r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); |
---|
| 1791 | + r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); |
---|
1770 | 1792 | if (r == RESUME_HOST) { |
---|
1771 | 1793 | vcpu->mmio_needed = 1; |
---|
1772 | 1794 | goto out; |
---|
.. | .. |
---|
1780 | 1802 | } |
---|
1781 | 1803 | |
---|
1782 | 1804 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
---|
1783 | | - r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); |
---|
| 1805 | + r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); |
---|
1784 | 1806 | if (r == RESUME_HOST) { |
---|
1785 | 1807 | vcpu->mmio_needed = 1; |
---|
1786 | 1808 | goto out; |
---|
.. | .. |
---|
1813 | 1835 | if (run->immediate_exit) |
---|
1814 | 1836 | r = -EINTR; |
---|
1815 | 1837 | else |
---|
1816 | | - r = kvmppc_vcpu_run(run, vcpu); |
---|
| 1838 | + r = kvmppc_vcpu_run(vcpu); |
---|
1817 | 1839 | |
---|
1818 | 1840 | kvm_sigset_deactivate(vcpu); |
---|
1819 | 1841 | |
---|
.. | .. |
---|
1913 | 1935 | r = -EPERM; |
---|
1914 | 1936 | dev = kvm_device_from_filp(f.file); |
---|
1915 | 1937 | if (dev) { |
---|
1916 | | - if (xive_enabled()) |
---|
| 1938 | + if (xics_on_xive()) |
---|
1917 | 1939 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); |
---|
1918 | 1940 | else |
---|
1919 | 1941 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); |
---|
.. | .. |
---|
1923 | 1945 | break; |
---|
1924 | 1946 | } |
---|
1925 | 1947 | #endif /* CONFIG_KVM_XICS */ |
---|
| 1948 | +#ifdef CONFIG_KVM_XIVE |
---|
| 1949 | + case KVM_CAP_PPC_IRQ_XIVE: { |
---|
| 1950 | + struct fd f; |
---|
| 1951 | + struct kvm_device *dev; |
---|
| 1952 | + |
---|
| 1953 | + r = -EBADF; |
---|
| 1954 | + f = fdget(cap->args[0]); |
---|
| 1955 | + if (!f.file) |
---|
| 1956 | + break; |
---|
| 1957 | + |
---|
| 1958 | + r = -ENXIO; |
---|
| 1959 | + if (!xive_enabled()) |
---|
| 1960 | + break; |
---|
| 1961 | + |
---|
| 1962 | + r = -EPERM; |
---|
| 1963 | + dev = kvm_device_from_filp(f.file); |
---|
| 1964 | + if (dev) |
---|
| 1965 | + r = kvmppc_xive_native_connect_vcpu(dev, vcpu, |
---|
| 1966 | + cap->args[1]); |
---|
| 1967 | + |
---|
| 1968 | + fdput(f); |
---|
| 1969 | + break; |
---|
| 1970 | + } |
---|
| 1971 | +#endif /* CONFIG_KVM_XIVE */ |
---|
1926 | 1972 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
---|
1927 | 1973 | case KVM_CAP_PPC_FWNMI: |
---|
1928 | 1974 | r = -EINVAL; |
---|
.. | .. |
---|
2090 | 2136 | } |
---|
2091 | 2137 | |
---|
2092 | 2138 | |
---|
2093 | | -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
---|
2094 | | - struct kvm_enable_cap *cap) |
---|
| 2139 | +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
---|
| 2140 | + struct kvm_enable_cap *cap) |
---|
2095 | 2141 | { |
---|
2096 | 2142 | int r; |
---|
2097 | 2143 | |
---|
.. | .. |
---|
2125 | 2171 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); |
---|
2126 | 2172 | break; |
---|
2127 | 2173 | } |
---|
| 2174 | + |
---|
| 2175 | + case KVM_CAP_PPC_NESTED_HV: |
---|
| 2176 | + r = -EINVAL; |
---|
| 2177 | + if (!is_kvmppc_hv_enabled(kvm) || |
---|
| 2178 | + !kvm->arch.kvm_ops->enable_nested) |
---|
| 2179 | + break; |
---|
| 2180 | + r = kvm->arch.kvm_ops->enable_nested(kvm); |
---|
| 2181 | + break; |
---|
| 2182 | +#endif |
---|
| 2183 | +#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
---|
| 2184 | + case KVM_CAP_PPC_SECURE_GUEST: |
---|
| 2185 | + r = -EINVAL; |
---|
| 2186 | + if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) |
---|
| 2187 | + break; |
---|
| 2188 | + r = kvm->arch.kvm_ops->enable_svm(kvm); |
---|
| 2189 | + break; |
---|
2128 | 2190 | #endif |
---|
2129 | 2191 | default: |
---|
2130 | 2192 | r = -EINVAL; |
---|
.. | .. |
---|
2163 | 2225 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
---|
2164 | 2226 | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | |
---|
2165 | 2227 | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | |
---|
2166 | | - KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
---|
| 2228 | + KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
---|
| 2229 | + KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
---|
2167 | 2230 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
---|
2168 | 2231 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
---|
2169 | | - KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
---|
| 2232 | + KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
---|
| 2233 | + KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
---|
2170 | 2234 | } |
---|
2171 | 2235 | return 0; |
---|
2172 | 2236 | } |
---|
.. | .. |
---|
2225 | 2289 | if (have_fw_feat(fw_features, "enabled", |
---|
2226 | 2290 | "fw-count-cache-disabled")) |
---|
2227 | 2291 | cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
---|
| 2292 | + if (have_fw_feat(fw_features, "enabled", |
---|
| 2293 | + "fw-count-cache-flush-bcctr2,0,0")) |
---|
| 2294 | + cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
---|
2228 | 2295 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
---|
2229 | 2296 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
---|
2230 | 2297 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
---|
2231 | 2298 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
---|
2232 | 2299 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
---|
2233 | | - KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
---|
| 2300 | + KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
---|
| 2301 | + KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
---|
2234 | 2302 | |
---|
2235 | 2303 | if (have_fw_feat(fw_features, "enabled", |
---|
2236 | 2304 | "speculation-policy-favor-security")) |
---|
.. | .. |
---|
2241 | 2309 | if (!have_fw_feat(fw_features, "disabled", |
---|
2242 | 2310 | "needs-spec-barrier-for-bound-checks")) |
---|
2243 | 2311 | cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
---|
| 2312 | + if (have_fw_feat(fw_features, "enabled", |
---|
| 2313 | + "needs-count-cache-flush-on-context-switch")) |
---|
| 2314 | + cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
---|
2244 | 2315 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
---|
2245 | 2316 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
---|
2246 | | - KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
---|
| 2317 | + KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
---|
| 2318 | + KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
---|
2247 | 2319 | |
---|
2248 | 2320 | of_node_put(fw_features); |
---|
2249 | 2321 | } |
---|
.. | .. |
---|
2269 | 2341 | goto out; |
---|
2270 | 2342 | } |
---|
2271 | 2343 | |
---|
2272 | | - break; |
---|
2273 | | - } |
---|
2274 | | - case KVM_ENABLE_CAP: |
---|
2275 | | - { |
---|
2276 | | - struct kvm_enable_cap cap; |
---|
2277 | | - r = -EFAULT; |
---|
2278 | | - if (copy_from_user(&cap, argp, sizeof(cap))) |
---|
2279 | | - goto out; |
---|
2280 | | - r = kvm_vm_ioctl_enable_cap(kvm, &cap); |
---|
2281 | 2344 | break; |
---|
2282 | 2345 | } |
---|
2283 | 2346 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
---|
.. | .. |
---|
2362 | 2425 | r = -EFAULT; |
---|
2363 | 2426 | break; |
---|
2364 | 2427 | } |
---|
| 2428 | + case KVM_PPC_SVM_OFF: { |
---|
| 2429 | + struct kvm *kvm = filp->private_data; |
---|
| 2430 | + |
---|
| 2431 | + r = 0; |
---|
| 2432 | + if (!kvm->arch.kvm_ops->svm_off) |
---|
| 2433 | + goto out; |
---|
| 2434 | + |
---|
| 2435 | + r = kvm->arch.kvm_ops->svm_off(kvm); |
---|
| 2436 | + break; |
---|
| 2437 | + } |
---|
2365 | 2438 | default: { |
---|
2366 | 2439 | struct kvm *kvm = filp->private_data; |
---|
2367 | 2440 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); |
---|