.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. |
---|
3 | 4 | * |
---|
.. | .. |
---|
8 | 9 | * Description: |
---|
9 | 10 | * This file is derived from arch/powerpc/kvm/44x.c, |
---|
10 | 11 | * by Hollis Blanchard <hollisb@us.ibm.com>. |
---|
11 | | - * |
---|
12 | | - * This program is free software; you can redistribute it and/or modify |
---|
13 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
14 | | - * published by the Free Software Foundation. |
---|
15 | 12 | */ |
---|
16 | 13 | |
---|
17 | 14 | #include <linux/kvm_host.h> |
---|
.. | .. |
---|
39 | 36 | #include "book3s.h" |
---|
40 | 37 | #include "trace.h" |
---|
41 | 38 | |
---|
42 | | -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
---|
43 | | - |
---|
44 | 39 | /* #define EXIT_DEBUG */ |
---|
45 | 40 | |
---|
46 | 41 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
---|
47 | | - { "exits", VCPU_STAT(sum_exits) }, |
---|
48 | | - { "mmio", VCPU_STAT(mmio_exits) }, |
---|
49 | | - { "sig", VCPU_STAT(signal_exits) }, |
---|
50 | | - { "sysc", VCPU_STAT(syscall_exits) }, |
---|
51 | | - { "inst_emu", VCPU_STAT(emulated_inst_exits) }, |
---|
52 | | - { "dec", VCPU_STAT(dec_exits) }, |
---|
53 | | - { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
---|
54 | | - { "queue_intr", VCPU_STAT(queue_intr) }, |
---|
55 | | - { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) }, |
---|
56 | | - { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) }, |
---|
57 | | - { "halt_wait_ns", VCPU_STAT(halt_wait_ns) }, |
---|
58 | | - { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, |
---|
59 | | - { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), }, |
---|
60 | | - { "halt_successful_wait", VCPU_STAT(halt_successful_wait) }, |
---|
61 | | - { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, |
---|
62 | | - { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
---|
63 | | - { "pf_storage", VCPU_STAT(pf_storage) }, |
---|
64 | | - { "sp_storage", VCPU_STAT(sp_storage) }, |
---|
65 | | - { "pf_instruc", VCPU_STAT(pf_instruc) }, |
---|
66 | | - { "sp_instruc", VCPU_STAT(sp_instruc) }, |
---|
67 | | - { "ld", VCPU_STAT(ld) }, |
---|
68 | | - { "ld_slow", VCPU_STAT(ld_slow) }, |
---|
69 | | - { "st", VCPU_STAT(st) }, |
---|
70 | | - { "st_slow", VCPU_STAT(st_slow) }, |
---|
71 | | - { "pthru_all", VCPU_STAT(pthru_all) }, |
---|
72 | | - { "pthru_host", VCPU_STAT(pthru_host) }, |
---|
73 | | - { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) }, |
---|
| 42 | + VCPU_STAT("exits", sum_exits), |
---|
| 43 | + VCPU_STAT("mmio", mmio_exits), |
---|
| 44 | + VCPU_STAT("sig", signal_exits), |
---|
| 45 | + VCPU_STAT("sysc", syscall_exits), |
---|
| 46 | + VCPU_STAT("inst_emu", emulated_inst_exits), |
---|
| 47 | + VCPU_STAT("dec", dec_exits), |
---|
| 48 | + VCPU_STAT("ext_intr", ext_intr_exits), |
---|
| 49 | + VCPU_STAT("queue_intr", queue_intr), |
---|
| 50 | + VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), |
---|
| 51 | + VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), |
---|
| 52 | + VCPU_STAT("halt_wait_ns", halt_wait_ns), |
---|
| 53 | + VCPU_STAT("halt_successful_poll", halt_successful_poll), |
---|
| 54 | + VCPU_STAT("halt_attempted_poll", halt_attempted_poll), |
---|
| 55 | + VCPU_STAT("halt_successful_wait", halt_successful_wait), |
---|
| 56 | + VCPU_STAT("halt_poll_invalid", halt_poll_invalid), |
---|
| 57 | + VCPU_STAT("halt_wakeup", halt_wakeup), |
---|
| 58 | + VCPU_STAT("pf_storage", pf_storage), |
---|
| 59 | + VCPU_STAT("sp_storage", sp_storage), |
---|
| 60 | + VCPU_STAT("pf_instruc", pf_instruc), |
---|
| 61 | + VCPU_STAT("sp_instruc", sp_instruc), |
---|
| 62 | + VCPU_STAT("ld", ld), |
---|
| 63 | + VCPU_STAT("ld_slow", ld_slow), |
---|
| 64 | + VCPU_STAT("st", st), |
---|
| 65 | + VCPU_STAT("st_slow", st_slow), |
---|
| 66 | + VCPU_STAT("pthru_all", pthru_all), |
---|
| 67 | + VCPU_STAT("pthru_host", pthru_host), |
---|
| 68 | + VCPU_STAT("pthru_bad_aff", pthru_bad_aff), |
---|
| 69 | + VM_STAT("largepages_2M", num_2M_pages, .mode = 0444), |
---|
| 70 | + VM_STAT("largepages_1G", num_1G_pages, .mode = 0444), |
---|
74 | 71 | { NULL } |
---|
75 | 72 | }; |
---|
76 | | - |
---|
77 | | -void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) |
---|
78 | | -{ |
---|
79 | | - if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { |
---|
80 | | - ulong pc = kvmppc_get_pc(vcpu); |
---|
81 | | - ulong lr = kvmppc_get_lr(vcpu); |
---|
82 | | - if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) |
---|
83 | | - kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); |
---|
84 | | - if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) |
---|
85 | | - kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); |
---|
86 | | - vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; |
---|
87 | | - } |
---|
88 | | -} |
---|
89 | | -EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real); |
---|
90 | | - |
---|
91 | | -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) |
---|
92 | | -{ |
---|
93 | | - if (!is_kvmppc_hv_enabled(vcpu->kvm)) |
---|
94 | | - return to_book3s(vcpu)->hior; |
---|
95 | | - return 0; |
---|
96 | | -} |
---|
97 | 73 | |
---|
98 | 74 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, |
---|
99 | 75 | unsigned long pending_now, unsigned long old_pending) |
---|
.. | .. |
---|
134 | 110 | |
---|
135 | 111 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
---|
136 | 112 | { |
---|
137 | | - kvmppc_unfixup_split_real(vcpu); |
---|
138 | | - kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); |
---|
139 | | - kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags); |
---|
140 | | - kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); |
---|
141 | | - vcpu->arch.mmu.reset_msr(vcpu); |
---|
| 113 | + vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags); |
---|
142 | 114 | } |
---|
143 | 115 | |
---|
144 | 116 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) |
---|
.. | .. |
---|
153 | 125 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; |
---|
154 | 126 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; |
---|
155 | 127 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; |
---|
156 | | - case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; |
---|
157 | 128 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; |
---|
158 | 129 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; |
---|
159 | 130 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; |
---|
.. | .. |
---|
192 | 163 | #endif |
---|
193 | 164 | } |
---|
194 | 165 | EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); |
---|
| 166 | + |
---|
| 167 | +void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags) |
---|
| 168 | +{ |
---|
| 169 | + /* might as well deliver this straight away */ |
---|
| 170 | + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags); |
---|
| 171 | +} |
---|
| 172 | +EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); |
---|
195 | 173 | |
---|
196 | 174 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) |
---|
197 | 175 | { |
---|
.. | .. |
---|
239 | 217 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
---|
240 | 218 | struct kvm_interrupt *irq) |
---|
241 | 219 | { |
---|
242 | | - unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; |
---|
| 220 | + /* |
---|
| 221 | + * This case (KVM_INTERRUPT_SET) should never actually arise for |
---|
| 222 | + * a pseries guest (because pseries guests expect their interrupt |
---|
| 223 | + * controllers to continue asserting an external interrupt request |
---|
| 224 | + * until it is acknowledged at the interrupt controller), but is |
---|
| 225 | + * included to avoid ABI breakage and potentially for other |
---|
| 226 | + * sorts of guest. |
---|
| 227 | + * |
---|
| 228 | + * There is a subtlety here: HV KVM does not test the |
---|
| 229 | + * external_oneshot flag in the code that synthesizes |
---|
| 230 | + * external interrupts for the guest just before entering |
---|
| 231 | + * the guest. That is OK even if userspace did do a |
---|
| 232 | + * KVM_INTERRUPT_SET on a pseries guest vcpu, because the |
---|
| 233 | + * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick() |
---|
| 234 | + * which ends up doing a smp_send_reschedule(), which will |
---|
| 235 | + * pull the guest all the way out to the host, meaning that |
---|
| 236 | + * we will call kvmppc_core_prepare_to_enter() before entering |
---|
| 237 | + * the guest again, and that will handle the external_oneshot |
---|
| 238 | + * flag correctly. |
---|
| 239 | + */ |
---|
| 240 | + if (irq->irq == KVM_INTERRUPT_SET) |
---|
| 241 | + vcpu->arch.external_oneshot = 1; |
---|
243 | 242 | |
---|
244 | | - if (irq->irq == KVM_INTERRUPT_SET_LEVEL) |
---|
245 | | - vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; |
---|
246 | | - |
---|
247 | | - kvmppc_book3s_queue_irqprio(vcpu, vec); |
---|
| 243 | + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); |
---|
248 | 244 | } |
---|
249 | 245 | |
---|
250 | 246 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) |
---|
251 | 247 | { |
---|
252 | 248 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); |
---|
253 | | - kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); |
---|
254 | 249 | } |
---|
255 | 250 | |
---|
256 | 251 | void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, |
---|
.. | .. |
---|
281 | 276 | vec = BOOK3S_INTERRUPT_DECREMENTER; |
---|
282 | 277 | break; |
---|
283 | 278 | case BOOK3S_IRQPRIO_EXTERNAL: |
---|
284 | | - case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: |
---|
285 | 279 | deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; |
---|
286 | 280 | vec = BOOK3S_INTERRUPT_EXTERNAL; |
---|
287 | 281 | break; |
---|
.. | .. |
---|
355 | 349 | case BOOK3S_IRQPRIO_DECREMENTER: |
---|
356 | 350 | /* DEC interrupts get cleared by mtdec */ |
---|
357 | 351 | return false; |
---|
358 | | - case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: |
---|
359 | | - /* External interrupts get cleared by userspace */ |
---|
| 352 | + case BOOK3S_IRQPRIO_EXTERNAL: |
---|
| 353 | + /* |
---|
| 354 | + * External interrupts get cleared by userspace |
---|
| 355 | + * except when set by the KVM_INTERRUPT ioctl with |
---|
| 356 | + * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL). |
---|
| 357 | + */ |
---|
| 358 | + if (vcpu->arch.external_oneshot) { |
---|
| 359 | + vcpu->arch.external_oneshot = 0; |
---|
| 360 | + return true; |
---|
| 361 | + } |
---|
360 | 362 | return false; |
---|
361 | 363 | } |
---|
362 | 364 | |
---|
.. | .. |
---|
466 | 468 | } |
---|
467 | 469 | EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); |
---|
468 | 470 | |
---|
469 | | -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
---|
470 | | -{ |
---|
471 | | - return 0; |
---|
472 | | -} |
---|
473 | | - |
---|
474 | 471 | int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) |
---|
475 | 472 | { |
---|
476 | 473 | return 0; |
---|
.. | .. |
---|
561 | 558 | |
---|
562 | 559 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
---|
563 | 560 | { |
---|
564 | | - return -ENOTSUPP; |
---|
| 561 | + return -EOPNOTSUPP; |
---|
565 | 562 | } |
---|
566 | 563 | |
---|
567 | 564 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
---|
568 | 565 | { |
---|
569 | | - return -ENOTSUPP; |
---|
| 566 | + return -EOPNOTSUPP; |
---|
570 | 567 | } |
---|
571 | 568 | |
---|
572 | 569 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, |
---|
.. | .. |
---|
612 | 609 | r = -ENXIO; |
---|
613 | 610 | break; |
---|
614 | 611 | } |
---|
615 | | - if (xive_enabled()) |
---|
| 612 | + if (xics_on_xive()) |
---|
616 | 613 | *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu)); |
---|
617 | 614 | else |
---|
618 | 615 | *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); |
---|
619 | 616 | break; |
---|
620 | 617 | #endif /* CONFIG_KVM_XICS */ |
---|
| 618 | +#ifdef CONFIG_KVM_XIVE |
---|
| 619 | + case KVM_REG_PPC_VP_STATE: |
---|
| 620 | + if (!vcpu->arch.xive_vcpu) { |
---|
| 621 | + r = -ENXIO; |
---|
| 622 | + break; |
---|
| 623 | + } |
---|
| 624 | + if (xive_enabled()) |
---|
| 625 | + r = kvmppc_xive_native_get_vp(vcpu, val); |
---|
| 626 | + else |
---|
| 627 | + r = -ENXIO; |
---|
| 628 | + break; |
---|
| 629 | +#endif /* CONFIG_KVM_XIVE */ |
---|
621 | 630 | case KVM_REG_PPC_FSCR: |
---|
622 | 631 | *val = get_reg_val(id, vcpu->arch.fscr); |
---|
623 | 632 | break; |
---|
.. | .. |
---|
685 | 694 | r = -ENXIO; |
---|
686 | 695 | break; |
---|
687 | 696 | } |
---|
688 | | - if (xive_enabled()) |
---|
| 697 | + if (xics_on_xive()) |
---|
689 | 698 | r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val)); |
---|
690 | 699 | else |
---|
691 | 700 | r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val)); |
---|
692 | 701 | break; |
---|
693 | 702 | #endif /* CONFIG_KVM_XICS */ |
---|
| 703 | +#ifdef CONFIG_KVM_XIVE |
---|
| 704 | + case KVM_REG_PPC_VP_STATE: |
---|
| 705 | + if (!vcpu->arch.xive_vcpu) { |
---|
| 706 | + r = -ENXIO; |
---|
| 707 | + break; |
---|
| 708 | + } |
---|
| 709 | + if (xive_enabled()) |
---|
| 710 | + r = kvmppc_xive_native_set_vp(vcpu, val); |
---|
| 711 | + else |
---|
| 712 | + r = -ENXIO; |
---|
| 713 | + break; |
---|
| 714 | +#endif /* CONFIG_KVM_XIVE */ |
---|
694 | 715 | case KVM_REG_PPC_FSCR: |
---|
695 | 716 | vcpu->arch.fscr = set_reg_val(id, *val); |
---|
696 | 717 | break; |
---|
.. | .. |
---|
734 | 755 | } |
---|
735 | 756 | EXPORT_SYMBOL_GPL(kvmppc_set_msr); |
---|
736 | 757 | |
---|
737 | | -int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
---|
| 758 | +int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) |
---|
738 | 759 | { |
---|
739 | | - return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); |
---|
| 760 | + return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu); |
---|
740 | 761 | } |
---|
741 | 762 | |
---|
742 | 763 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
---|
.. | .. |
---|
760 | 781 | kvm_vcpu_kick(vcpu); |
---|
761 | 782 | } |
---|
762 | 783 | |
---|
763 | | -struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) |
---|
| 784 | +int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu) |
---|
764 | 785 | { |
---|
765 | | - return kvm->arch.kvm_ops->vcpu_create(kvm, id); |
---|
| 786 | + return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu); |
---|
766 | 787 | } |
---|
767 | 788 | |
---|
768 | 789 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
775 | 796 | return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); |
---|
776 | 797 | } |
---|
777 | 798 | |
---|
| 799 | +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
---|
| 800 | +{ |
---|
| 801 | + |
---|
| 802 | +} |
---|
| 803 | + |
---|
778 | 804 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
---|
779 | 805 | { |
---|
780 | 806 | return kvm->arch.kvm_ops->get_dirty_log(kvm, log); |
---|
781 | 807 | } |
---|
782 | 808 | |
---|
783 | | -void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
---|
784 | | - struct kvm_memory_slot *dont) |
---|
| 809 | +void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
---|
785 | 810 | { |
---|
786 | | - kvm->arch.kvm_ops->free_memslot(free, dont); |
---|
787 | | -} |
---|
788 | | - |
---|
789 | | -int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
---|
790 | | - unsigned long npages) |
---|
791 | | -{ |
---|
792 | | - return kvm->arch.kvm_ops->create_memslot(slot, npages); |
---|
| 811 | + kvm->arch.kvm_ops->free_memslot(slot); |
---|
793 | 812 | } |
---|
794 | 813 | |
---|
795 | 814 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) |
---|
.. | .. |
---|
799 | 818 | |
---|
800 | 819 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
---|
801 | 820 | struct kvm_memory_slot *memslot, |
---|
802 | | - const struct kvm_userspace_memory_region *mem) |
---|
| 821 | + const struct kvm_userspace_memory_region *mem, |
---|
| 822 | + enum kvm_mr_change change) |
---|
803 | 823 | { |
---|
804 | | - return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); |
---|
| 824 | + return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem, |
---|
| 825 | + change); |
---|
805 | 826 | } |
---|
806 | 827 | |
---|
807 | 828 | void kvmppc_core_commit_memory_region(struct kvm *kvm, |
---|
808 | 829 | const struct kvm_userspace_memory_region *mem, |
---|
809 | 830 | const struct kvm_memory_slot *old, |
---|
810 | | - const struct kvm_memory_slot *new) |
---|
| 831 | + const struct kvm_memory_slot *new, |
---|
| 832 | + enum kvm_mr_change change) |
---|
811 | 833 | { |
---|
812 | | - kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new); |
---|
| 834 | + kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); |
---|
813 | 835 | } |
---|
814 | 836 | |
---|
815 | 837 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, |
---|
816 | | - bool blockable) |
---|
| 838 | + unsigned flags) |
---|
817 | 839 | { |
---|
818 | 840 | return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); |
---|
819 | 841 | } |
---|
.. | .. |
---|
828 | 850 | return kvm->arch.kvm_ops->test_age_hva(kvm, hva); |
---|
829 | 851 | } |
---|
830 | 852 | |
---|
831 | | -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
---|
| 853 | +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
---|
832 | 854 | { |
---|
833 | 855 | kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); |
---|
834 | | -} |
---|
835 | | - |
---|
836 | | -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
---|
837 | | -{ |
---|
838 | | - vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); |
---|
| 856 | + return 0; |
---|
839 | 857 | } |
---|
840 | 858 | |
---|
841 | 859 | int kvmppc_core_init_vm(struct kvm *kvm) |
---|
.. | .. |
---|
858 | 876 | kvmppc_rtas_tokens_free(kvm); |
---|
859 | 877 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); |
---|
860 | 878 | #endif |
---|
| 879 | + |
---|
| 880 | +#ifdef CONFIG_KVM_XICS |
---|
| 881 | + /* |
---|
| 882 | + * Free the XIVE and XICS devices which are not directly freed by the |
---|
| 883 | + * device 'release' method |
---|
| 884 | + */ |
---|
| 885 | + kfree(kvm->arch.xive_devices.native); |
---|
| 886 | + kvm->arch.xive_devices.native = NULL; |
---|
| 887 | + kfree(kvm->arch.xive_devices.xics_on_xive); |
---|
| 888 | + kvm->arch.xive_devices.xics_on_xive = NULL; |
---|
| 889 | + kfree(kvm->arch.xics_device); |
---|
| 890 | + kvm->arch.xics_device = NULL; |
---|
| 891 | +#endif /* CONFIG_KVM_XICS */ |
---|
861 | 892 | } |
---|
862 | 893 | |
---|
863 | 894 | int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
961 | 992 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
---|
962 | 993 | bool line_status) |
---|
963 | 994 | { |
---|
964 | | - if (xive_enabled()) |
---|
| 995 | + if (xics_on_xive()) |
---|
965 | 996 | return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level, |
---|
966 | 997 | line_status); |
---|
967 | 998 | else |
---|
.. | .. |
---|
1014 | 1045 | |
---|
1015 | 1046 | #ifdef CONFIG_KVM_XICS |
---|
1016 | 1047 | #ifdef CONFIG_KVM_XIVE |
---|
1017 | | - if (xive_enabled()) { |
---|
| 1048 | + if (xics_on_xive()) { |
---|
1018 | 1049 | kvmppc_xive_init_module(); |
---|
1019 | 1050 | kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); |
---|
| 1051 | + if (kvmppc_xive_native_supported()) { |
---|
| 1052 | + kvmppc_xive_native_init_module(); |
---|
| 1053 | + kvm_register_device_ops(&kvm_xive_native_ops, |
---|
| 1054 | + KVM_DEV_TYPE_XIVE); |
---|
| 1055 | + } |
---|
1020 | 1056 | } else |
---|
1021 | 1057 | #endif |
---|
1022 | 1058 | kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); |
---|
.. | .. |
---|
1027 | 1063 | static void kvmppc_book3s_exit(void) |
---|
1028 | 1064 | { |
---|
1029 | 1065 | #ifdef CONFIG_KVM_XICS |
---|
1030 | | - if (xive_enabled()) |
---|
| 1066 | + if (xics_on_xive()) { |
---|
1031 | 1067 | kvmppc_xive_exit_module(); |
---|
| 1068 | + kvmppc_xive_native_exit_module(); |
---|
| 1069 | + } |
---|
1032 | 1070 | #endif |
---|
1033 | 1071 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
---|
1034 | 1072 | kvmppc_book3s_exit_pr(); |
---|