.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. |
---|
3 | 4 | * |
---|
.. | .. |
---|
13 | 14 | * |
---|
14 | 15 | * This file is derived from arch/powerpc/kvm/44x.c, |
---|
15 | 16 | * by Hollis Blanchard <hollisb@us.ibm.com>. |
---|
16 | | - * |
---|
17 | | - * This program is free software; you can redistribute it and/or modify |
---|
18 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
19 | | - * published by the Free Software Foundation. |
---|
20 | 17 | */ |
---|
21 | 18 | |
---|
22 | 19 | #include <linux/kvm_host.h> |
---|
.. | .. |
---|
93 | 90 | kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); |
---|
94 | 91 | } |
---|
95 | 92 | |
---|
96 | | -void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu); |
---|
| 93 | +static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) |
---|
| 94 | +{ |
---|
| 95 | + if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { |
---|
| 96 | + ulong pc = kvmppc_get_pc(vcpu); |
---|
| 97 | + ulong lr = kvmppc_get_lr(vcpu); |
---|
| 98 | + if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) |
---|
| 99 | + kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); |
---|
| 100 | + if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) |
---|
| 101 | + kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); |
---|
| 102 | + vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; |
---|
| 103 | + } |
---|
| 104 | +} |
---|
| 105 | + |
---|
| 106 | +static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) |
---|
| 107 | +{ |
---|
| 108 | + unsigned long msr, pc, new_msr, new_pc; |
---|
| 109 | + |
---|
| 110 | + kvmppc_unfixup_split_real(vcpu); |
---|
| 111 | + |
---|
| 112 | + msr = kvmppc_get_msr(vcpu); |
---|
| 113 | + pc = kvmppc_get_pc(vcpu); |
---|
| 114 | + new_msr = vcpu->arch.intr_msr; |
---|
| 115 | + new_pc = to_book3s(vcpu)->hior + vec; |
---|
| 116 | + |
---|
| 117 | +#ifdef CONFIG_PPC_BOOK3S_64 |
---|
| 118 | + /* If transactional, change to suspend mode on IRQ delivery */ |
---|
| 119 | + if (MSR_TM_TRANSACTIONAL(msr)) |
---|
| 120 | + new_msr |= MSR_TS_S; |
---|
| 121 | + else |
---|
| 122 | + new_msr |= msr & MSR_TS_MASK; |
---|
| 123 | +#endif |
---|
| 124 | + |
---|
| 125 | + kvmppc_set_srr0(vcpu, pc); |
---|
| 126 | + kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); |
---|
| 127 | + kvmppc_set_pc(vcpu, new_pc); |
---|
| 128 | + kvmppc_set_msr(vcpu, new_msr); |
---|
| 129 | +} |
---|
97 | 130 | |
---|
98 | 131 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
---|
99 | 132 | { |
---|
.. | .. |
---|
536 | 569 | #endif |
---|
537 | 570 | } |
---|
538 | 571 | |
---|
539 | | -void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
---|
| 572 | +static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
---|
540 | 573 | { |
---|
541 | 574 | u32 host_pvr; |
---|
542 | 575 | |
---|
.. | .. |
---|
587 | 620 | case PVR_POWER8: |
---|
588 | 621 | case PVR_POWER8E: |
---|
589 | 622 | case PVR_POWER8NVL: |
---|
| 623 | + case PVR_POWER9: |
---|
590 | 624 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | |
---|
591 | 625 | BOOK3S_HFLAG_NEW_TLBIE; |
---|
592 | 626 | break; |
---|
.. | .. |
---|
666 | 700 | return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); |
---|
667 | 701 | } |
---|
668 | 702 | |
---|
669 | | -int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
| 703 | +static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, |
---|
670 | 704 | ulong eaddr, int vec) |
---|
671 | 705 | { |
---|
672 | 706 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); |
---|
.. | .. |
---|
706 | 740 | (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && |
---|
707 | 741 | ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) |
---|
708 | 742 | pte.raddr &= ~SPLIT_HACK_MASK; |
---|
709 | | - /* fall through */ |
---|
| 743 | + fallthrough; |
---|
710 | 744 | case MSR_IR: |
---|
711 | 745 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
---|
712 | 746 | |
---|
.. | .. |
---|
761 | 795 | /* The guest's PTE is not mapped yet. Map on the host */ |
---|
762 | 796 | if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { |
---|
763 | 797 | /* Exit KVM if mapping failed */ |
---|
764 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
---|
| 798 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
---|
765 | 799 | return RESUME_HOST; |
---|
766 | 800 | } |
---|
767 | 801 | if (data) |
---|
.. | .. |
---|
774 | 808 | vcpu->stat.mmio_exits++; |
---|
775 | 809 | vcpu->arch.paddr_accessed = pte.raddr; |
---|
776 | 810 | vcpu->arch.vaddr_accessed = pte.eaddr; |
---|
777 | | - r = kvmppc_emulate_mmio(run, vcpu); |
---|
| 811 | + r = kvmppc_emulate_mmio(vcpu); |
---|
778 | 812 | if ( r == RESUME_HOST_NV ) |
---|
779 | 813 | r = RESUME_HOST; |
---|
780 | 814 | } |
---|
.. | .. |
---|
958 | 992 | enum emulation_result er = EMULATE_FAIL; |
---|
959 | 993 | |
---|
960 | 994 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) |
---|
961 | | - er = kvmppc_emulate_instruction(vcpu->run, vcpu); |
---|
| 995 | + er = kvmppc_emulate_instruction(vcpu); |
---|
962 | 996 | |
---|
963 | 997 | if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { |
---|
964 | 998 | /* Couldn't emulate, trigger interrupt in guest */ |
---|
.. | .. |
---|
1055 | 1089 | } |
---|
1056 | 1090 | } |
---|
1057 | 1091 | |
---|
1058 | | -static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
1059 | | - unsigned int exit_nr) |
---|
| 1092 | +static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) |
---|
1060 | 1093 | { |
---|
1061 | 1094 | enum emulation_result er; |
---|
1062 | 1095 | ulong flags; |
---|
.. | .. |
---|
1090 | 1123 | } |
---|
1091 | 1124 | |
---|
1092 | 1125 | vcpu->stat.emulated_inst_exits++; |
---|
1093 | | - er = kvmppc_emulate_instruction(run, vcpu); |
---|
| 1126 | + er = kvmppc_emulate_instruction(vcpu); |
---|
1094 | 1127 | switch (er) { |
---|
1095 | 1128 | case EMULATE_DONE: |
---|
1096 | 1129 | r = RESUME_GUEST_NV; |
---|
.. | .. |
---|
1105 | 1138 | r = RESUME_GUEST; |
---|
1106 | 1139 | break; |
---|
1107 | 1140 | case EMULATE_DO_MMIO: |
---|
1108 | | - run->exit_reason = KVM_EXIT_MMIO; |
---|
| 1141 | + vcpu->run->exit_reason = KVM_EXIT_MMIO; |
---|
1109 | 1142 | r = RESUME_HOST_NV; |
---|
1110 | 1143 | break; |
---|
1111 | 1144 | case EMULATE_EXIT_USER: |
---|
.. | .. |
---|
1118 | 1151 | return r; |
---|
1119 | 1152 | } |
---|
1120 | 1153 | |
---|
1121 | | -int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
1122 | | - unsigned int exit_nr) |
---|
| 1154 | +int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) |
---|
1123 | 1155 | { |
---|
| 1156 | + struct kvm_run *run = vcpu->run; |
---|
1124 | 1157 | int r = RESUME_HOST; |
---|
1125 | 1158 | int s; |
---|
1126 | 1159 | |
---|
.. | .. |
---|
1164 | 1197 | /* only care about PTEG not found errors, but leave NX alone */ |
---|
1165 | 1198 | if (shadow_srr1 & 0x40000000) { |
---|
1166 | 1199 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
---|
1167 | | - r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
---|
| 1200 | + r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); |
---|
1168 | 1201 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
---|
1169 | 1202 | vcpu->stat.sp_instruc++; |
---|
1170 | 1203 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
---|
.. | .. |
---|
1214 | 1247 | */ |
---|
1215 | 1248 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { |
---|
1216 | 1249 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
---|
1217 | | - r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
---|
| 1250 | + r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); |
---|
1218 | 1251 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
---|
1219 | 1252 | } else { |
---|
1220 | 1253 | kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); |
---|
.. | .. |
---|
1246 | 1279 | r = RESUME_GUEST; |
---|
1247 | 1280 | break; |
---|
1248 | 1281 | case BOOK3S_INTERRUPT_EXTERNAL: |
---|
1249 | | - case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: |
---|
1250 | 1282 | case BOOK3S_INTERRUPT_EXTERNAL_HV: |
---|
1251 | 1283 | case BOOK3S_INTERRUPT_H_VIRT: |
---|
1252 | 1284 | vcpu->stat.ext_intr_exits++; |
---|
.. | .. |
---|
1259 | 1291 | break; |
---|
1260 | 1292 | case BOOK3S_INTERRUPT_PROGRAM: |
---|
1261 | 1293 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
---|
1262 | | - r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); |
---|
| 1294 | + r = kvmppc_exit_pr_progint(vcpu, exit_nr); |
---|
1263 | 1295 | break; |
---|
1264 | 1296 | case BOOK3S_INTERRUPT_SYSCALL: |
---|
1265 | 1297 | { |
---|
.. | .. |
---|
1337 | 1369 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, |
---|
1338 | 1370 | &last_inst); |
---|
1339 | 1371 | if (emul == EMULATE_DONE) |
---|
1340 | | - r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); |
---|
| 1372 | + r = kvmppc_exit_pr_progint(vcpu, exit_nr); |
---|
1341 | 1373 | else |
---|
1342 | 1374 | r = RESUME_GUEST; |
---|
1343 | 1375 | |
---|
.. | .. |
---|
1711 | 1743 | return r; |
---|
1712 | 1744 | } |
---|
1713 | 1745 | |
---|
1714 | | -static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, |
---|
1715 | | - unsigned int id) |
---|
| 1746 | +static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) |
---|
1716 | 1747 | { |
---|
1717 | 1748 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
---|
1718 | | - struct kvm_vcpu *vcpu; |
---|
1719 | | - int err = -ENOMEM; |
---|
1720 | 1749 | unsigned long p; |
---|
| 1750 | + int err; |
---|
1721 | 1751 | |
---|
1722 | | - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
---|
1723 | | - if (!vcpu) |
---|
1724 | | - goto out; |
---|
| 1752 | + err = -ENOMEM; |
---|
1725 | 1753 | |
---|
1726 | 1754 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
---|
1727 | 1755 | if (!vcpu_book3s) |
---|
1728 | | - goto free_vcpu; |
---|
| 1756 | + goto out; |
---|
1729 | 1757 | vcpu->arch.book3s = vcpu_book3s; |
---|
1730 | 1758 | |
---|
1731 | 1759 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
---|
.. | .. |
---|
1735 | 1763 | goto free_vcpu3s; |
---|
1736 | 1764 | #endif |
---|
1737 | 1765 | |
---|
1738 | | - err = kvm_vcpu_init(vcpu, kvm, id); |
---|
1739 | | - if (err) |
---|
1740 | | - goto free_shadow_vcpu; |
---|
1741 | | - |
---|
1742 | | - err = -ENOMEM; |
---|
1743 | 1766 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
---|
1744 | 1767 | if (!p) |
---|
1745 | | - goto uninit_vcpu; |
---|
| 1768 | + goto free_shadow_vcpu; |
---|
1746 | 1769 | vcpu->arch.shared = (void *)p; |
---|
1747 | 1770 | #ifdef CONFIG_PPC_BOOK3S_64 |
---|
1748 | 1771 | /* Always start the shared struct in native endian mode */ |
---|
.. | .. |
---|
1764 | 1787 | #else |
---|
1765 | 1788 | /* default to book3s_32 (750) */ |
---|
1766 | 1789 | vcpu->arch.pvr = 0x84202; |
---|
| 1790 | + vcpu->arch.intr_msr = 0; |
---|
1767 | 1791 | #endif |
---|
1768 | 1792 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
---|
1769 | 1793 | vcpu->arch.slb_nr = 64; |
---|
1770 | 1794 | |
---|
1771 | 1795 | vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; |
---|
1772 | 1796 | |
---|
1773 | | - err = kvmppc_mmu_init(vcpu); |
---|
| 1797 | + err = kvmppc_mmu_init_pr(vcpu); |
---|
1774 | 1798 | if (err < 0) |
---|
1775 | 1799 | goto free_shared_page; |
---|
1776 | 1800 | |
---|
1777 | | - return vcpu; |
---|
| 1801 | + return 0; |
---|
1778 | 1802 | |
---|
1779 | 1803 | free_shared_page: |
---|
1780 | 1804 | free_page((unsigned long)vcpu->arch.shared); |
---|
1781 | | -uninit_vcpu: |
---|
1782 | | - kvm_vcpu_uninit(vcpu); |
---|
1783 | 1805 | free_shadow_vcpu: |
---|
1784 | 1806 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
---|
1785 | 1807 | kfree(vcpu->arch.shadow_vcpu); |
---|
1786 | 1808 | free_vcpu3s: |
---|
1787 | 1809 | #endif |
---|
1788 | 1810 | vfree(vcpu_book3s); |
---|
1789 | | -free_vcpu: |
---|
1790 | | - kmem_cache_free(kvm_vcpu_cache, vcpu); |
---|
1791 | 1811 | out: |
---|
1792 | | - return ERR_PTR(err); |
---|
| 1812 | + return err; |
---|
1793 | 1813 | } |
---|
1794 | 1814 | |
---|
1795 | 1815 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
---|
1796 | 1816 | { |
---|
1797 | 1817 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
---|
1798 | 1818 | |
---|
| 1819 | + kvmppc_mmu_destroy_pr(vcpu); |
---|
1799 | 1820 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); |
---|
1800 | | - kvm_vcpu_uninit(vcpu); |
---|
1801 | 1821 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
---|
1802 | 1822 | kfree(vcpu->arch.shadow_vcpu); |
---|
1803 | 1823 | #endif |
---|
1804 | 1824 | vfree(vcpu_book3s); |
---|
1805 | | - kmem_cache_free(kvm_vcpu_cache, vcpu); |
---|
1806 | 1825 | } |
---|
1807 | 1826 | |
---|
1808 | | -static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
---|
| 1827 | +static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) |
---|
1809 | 1828 | { |
---|
1810 | 1829 | int ret; |
---|
1811 | | -#ifdef CONFIG_ALTIVEC |
---|
1812 | | - unsigned long uninitialized_var(vrsave); |
---|
1813 | | -#endif |
---|
1814 | 1830 | |
---|
1815 | 1831 | /* Check if we can run the vcpu at all */ |
---|
1816 | 1832 | if (!vcpu->arch.sane) { |
---|
1817 | | - kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
---|
| 1833 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
---|
1818 | 1834 | ret = -EINVAL; |
---|
1819 | 1835 | goto out; |
---|
1820 | 1836 | } |
---|
.. | .. |
---|
1841 | 1857 | |
---|
1842 | 1858 | kvmppc_fix_ee_before_entry(); |
---|
1843 | 1859 | |
---|
1844 | | - ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
---|
| 1860 | + ret = __kvmppc_vcpu_run(vcpu); |
---|
1845 | 1861 | |
---|
1846 | 1862 | kvmppc_clear_debug(vcpu); |
---|
1847 | 1863 | |
---|
.. | .. |
---|
1865 | 1881 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
---|
1866 | 1882 | struct kvm_dirty_log *log) |
---|
1867 | 1883 | { |
---|
1868 | | - struct kvm_memslots *slots; |
---|
1869 | 1884 | struct kvm_memory_slot *memslot; |
---|
1870 | 1885 | struct kvm_vcpu *vcpu; |
---|
1871 | 1886 | ulong ga, ga_end; |
---|
.. | .. |
---|
1875 | 1890 | |
---|
1876 | 1891 | mutex_lock(&kvm->slots_lock); |
---|
1877 | 1892 | |
---|
1878 | | - r = kvm_get_dirty_log(kvm, log, &is_dirty); |
---|
| 1893 | + r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); |
---|
1879 | 1894 | if (r) |
---|
1880 | 1895 | goto out; |
---|
1881 | 1896 | |
---|
1882 | 1897 | /* If nothing is dirty, don't bother messing with page tables. */ |
---|
1883 | 1898 | if (is_dirty) { |
---|
1884 | | - slots = kvm_memslots(kvm); |
---|
1885 | | - memslot = id_to_memslot(slots, log->slot); |
---|
1886 | | - |
---|
1887 | 1899 | ga = memslot->base_gfn << PAGE_SHIFT; |
---|
1888 | 1900 | ga_end = ga + (memslot->npages << PAGE_SHIFT); |
---|
1889 | 1901 | |
---|
.. | .. |
---|
1908 | 1920 | |
---|
1909 | 1921 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, |
---|
1910 | 1922 | struct kvm_memory_slot *memslot, |
---|
1911 | | - const struct kvm_userspace_memory_region *mem) |
---|
| 1923 | + const struct kvm_userspace_memory_region *mem, |
---|
| 1924 | + enum kvm_mr_change change) |
---|
1912 | 1925 | { |
---|
1913 | 1926 | return 0; |
---|
1914 | 1927 | } |
---|
.. | .. |
---|
1916 | 1929 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
---|
1917 | 1930 | const struct kvm_userspace_memory_region *mem, |
---|
1918 | 1931 | const struct kvm_memory_slot *old, |
---|
1919 | | - const struct kvm_memory_slot *new) |
---|
| 1932 | + const struct kvm_memory_slot *new, |
---|
| 1933 | + enum kvm_mr_change change) |
---|
1920 | 1934 | { |
---|
1921 | 1935 | return; |
---|
1922 | 1936 | } |
---|
1923 | 1937 | |
---|
1924 | | -static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free, |
---|
1925 | | - struct kvm_memory_slot *dont) |
---|
| 1938 | +static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot) |
---|
1926 | 1939 | { |
---|
1927 | 1940 | return; |
---|
1928 | 1941 | } |
---|
1929 | | - |
---|
1930 | | -static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, |
---|
1931 | | - unsigned long npages) |
---|
1932 | | -{ |
---|
1933 | | - return 0; |
---|
1934 | | -} |
---|
1935 | | - |
---|
1936 | 1942 | |
---|
1937 | 1943 | #ifdef CONFIG_PPC64 |
---|
1938 | 1944 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
---|
.. | .. |
---|
1997 | 2003 | { |
---|
1998 | 2004 | /* We should not get called */ |
---|
1999 | 2005 | BUG(); |
---|
| 2006 | + return 0; |
---|
2000 | 2007 | } |
---|
2001 | 2008 | #endif /* CONFIG_PPC64 */ |
---|
2002 | 2009 | |
---|
.. | .. |
---|
2062 | 2069 | .set_one_reg = kvmppc_set_one_reg_pr, |
---|
2063 | 2070 | .vcpu_load = kvmppc_core_vcpu_load_pr, |
---|
2064 | 2071 | .vcpu_put = kvmppc_core_vcpu_put_pr, |
---|
| 2072 | + .inject_interrupt = kvmppc_inject_interrupt_pr, |
---|
2065 | 2073 | .set_msr = kvmppc_set_msr_pr, |
---|
2066 | 2074 | .vcpu_run = kvmppc_vcpu_run_pr, |
---|
2067 | 2075 | .vcpu_create = kvmppc_core_vcpu_create_pr, |
---|
.. | .. |
---|
2075 | 2083 | .age_hva = kvm_age_hva_pr, |
---|
2076 | 2084 | .test_age_hva = kvm_test_age_hva_pr, |
---|
2077 | 2085 | .set_spte_hva = kvm_set_spte_hva_pr, |
---|
2078 | | - .mmu_destroy = kvmppc_mmu_destroy_pr, |
---|
2079 | 2086 | .free_memslot = kvmppc_core_free_memslot_pr, |
---|
2080 | | - .create_memslot = kvmppc_core_create_memslot_pr, |
---|
2081 | 2087 | .init_vm = kvmppc_core_init_vm_pr, |
---|
2082 | 2088 | .destroy_vm = kvmppc_core_destroy_vm_pr, |
---|
2083 | 2089 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, |
---|