| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 3 | | - * it under the terms of the GNU General Public License, version 2, as |
|---|
| 4 | | - * published by the Free Software Foundation. |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 7 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 8 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 9 | | - * GNU General Public License for more details. |
|---|
| 10 | | - * |
|---|
| 11 | | - * You should have received a copy of the GNU General Public License |
|---|
| 12 | | - * along with this program; if not, write to the Free Software |
|---|
| 13 | | - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
|---|
| 14 | 3 | * |
|---|
| 15 | 4 | * Copyright IBM Corp. 2007 |
|---|
| 16 | 5 | * Copyright 2010-2011 Freescale Semiconductor, Inc. |
|---|
| .. | .. |
|---|
| 46 | 35 | |
|---|
| 47 | 36 | unsigned long kvmppc_booke_handlers; |
|---|
| 48 | 37 | |
|---|
| 49 | | -#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
|---|
| 50 | | -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
|---|
| 51 | | - |
|---|
| 52 | 38 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
|---|
| 53 | | - { "mmio", VCPU_STAT(mmio_exits) }, |
|---|
| 54 | | - { "sig", VCPU_STAT(signal_exits) }, |
|---|
| 55 | | - { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
|---|
| 56 | | - { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, |
|---|
| 57 | | - { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, |
|---|
| 58 | | - { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, |
|---|
| 59 | | - { "sysc", VCPU_STAT(syscall_exits) }, |
|---|
| 60 | | - { "isi", VCPU_STAT(isi_exits) }, |
|---|
| 61 | | - { "dsi", VCPU_STAT(dsi_exits) }, |
|---|
| 62 | | - { "inst_emu", VCPU_STAT(emulated_inst_exits) }, |
|---|
| 63 | | - { "dec", VCPU_STAT(dec_exits) }, |
|---|
| 64 | | - { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
|---|
| 65 | | - { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
|---|
| 66 | | - { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, |
|---|
| 67 | | - { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, |
|---|
| 68 | | - { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
|---|
| 69 | | - { "doorbell", VCPU_STAT(dbell_exits) }, |
|---|
| 70 | | - { "guest doorbell", VCPU_STAT(gdbell_exits) }, |
|---|
| 71 | | - { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, |
|---|
| 39 | + VCPU_STAT("mmio", mmio_exits), |
|---|
| 40 | + VCPU_STAT("sig", signal_exits), |
|---|
| 41 | + VCPU_STAT("itlb_r", itlb_real_miss_exits), |
|---|
| 42 | + VCPU_STAT("itlb_v", itlb_virt_miss_exits), |
|---|
| 43 | + VCPU_STAT("dtlb_r", dtlb_real_miss_exits), |
|---|
| 44 | + VCPU_STAT("dtlb_v", dtlb_virt_miss_exits), |
|---|
| 45 | + VCPU_STAT("sysc", syscall_exits), |
|---|
| 46 | + VCPU_STAT("isi", isi_exits), |
|---|
| 47 | + VCPU_STAT("dsi", dsi_exits), |
|---|
| 48 | + VCPU_STAT("inst_emu", emulated_inst_exits), |
|---|
| 49 | + VCPU_STAT("dec", dec_exits), |
|---|
| 50 | + VCPU_STAT("ext_intr", ext_intr_exits), |
|---|
| 51 | + VCPU_STAT("halt_successful_poll", halt_successful_poll), |
|---|
| 52 | + VCPU_STAT("halt_attempted_poll", halt_attempted_poll), |
|---|
| 53 | + VCPU_STAT("halt_poll_invalid", halt_poll_invalid), |
|---|
| 54 | + VCPU_STAT("halt_wakeup", halt_wakeup), |
|---|
| 55 | + VCPU_STAT("doorbell", dbell_exits), |
|---|
| 56 | + VCPU_STAT("guest doorbell", gdbell_exits), |
|---|
| 57 | + VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), |
|---|
| 58 | + VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), |
|---|
| 59 | + VM_STAT("remote_tlb_flush", remote_tlb_flush), |
|---|
| 72 | 60 | { NULL } |
|---|
| 73 | 61 | }; |
|---|
| 74 | 62 | |
|---|
| .. | .. |
|---|
| 432 | 420 | case BOOKE_IRQPRIO_DATA_STORAGE: |
|---|
| 433 | 421 | case BOOKE_IRQPRIO_ALIGNMENT: |
|---|
| 434 | 422 | update_dear = true; |
|---|
| 435 | | - /* fall through */ |
|---|
| 423 | + fallthrough; |
|---|
| 436 | 424 | case BOOKE_IRQPRIO_INST_STORAGE: |
|---|
| 437 | 425 | case BOOKE_IRQPRIO_PROGRAM: |
|---|
| 438 | 426 | update_esr = true; |
|---|
| 439 | | - /* fall through */ |
|---|
| 427 | + fallthrough; |
|---|
| 440 | 428 | case BOOKE_IRQPRIO_ITLB_MISS: |
|---|
| 441 | 429 | case BOOKE_IRQPRIO_SYSCALL: |
|---|
| 442 | 430 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
|---|
| .. | .. |
|---|
| 470 | 458 | case BOOKE_IRQPRIO_DECREMENTER: |
|---|
| 471 | 459 | case BOOKE_IRQPRIO_FIT: |
|---|
| 472 | 460 | keep_irq = true; |
|---|
| 473 | | - /* fall through */ |
|---|
| 461 | + fallthrough; |
|---|
| 474 | 462 | case BOOKE_IRQPRIO_EXTERNAL: |
|---|
| 475 | 463 | case BOOKE_IRQPRIO_DBELL: |
|---|
| 476 | 464 | allowed = vcpu->arch.shared->msr & MSR_EE; |
|---|
| .. | .. |
|---|
| 741 | 729 | return r; |
|---|
| 742 | 730 | } |
|---|
| 743 | 731 | |
|---|
| 744 | | -int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
|---|
| 732 | +int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) |
|---|
| 745 | 733 | { |
|---|
| 746 | 734 | int ret, s; |
|---|
| 747 | 735 | struct debug_reg debug; |
|---|
| 748 | 736 | |
|---|
| 749 | 737 | if (!vcpu->arch.sane) { |
|---|
| 750 | | - kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 738 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 751 | 739 | return -EINVAL; |
|---|
| 752 | 740 | } |
|---|
| 753 | 741 | |
|---|
| .. | .. |
|---|
| 786 | 774 | debug = current->thread.debug; |
|---|
| 787 | 775 | current->thread.debug = vcpu->arch.dbg_reg; |
|---|
| 788 | 776 | |
|---|
| 789 | | - vcpu->arch.pgdir = current->mm->pgd; |
|---|
| 777 | + vcpu->arch.pgdir = vcpu->kvm->mm->pgd; |
|---|
| 790 | 778 | kvmppc_fix_ee_before_entry(); |
|---|
| 791 | 779 | |
|---|
| 792 | | - ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
|---|
| 780 | + ret = __kvmppc_vcpu_run(vcpu); |
|---|
| 793 | 781 | |
|---|
| 794 | 782 | /* No need for guest_exit. It's done in handle_exit. |
|---|
| 795 | 783 | We also get here with interrupts enabled. */ |
|---|
| .. | .. |
|---|
| 811 | 799 | return ret; |
|---|
| 812 | 800 | } |
|---|
| 813 | 801 | |
|---|
| 814 | | -static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) |
|---|
| 802 | +static int emulation_exit(struct kvm_vcpu *vcpu) |
|---|
| 815 | 803 | { |
|---|
| 816 | 804 | enum emulation_result er; |
|---|
| 817 | 805 | |
|---|
| 818 | | - er = kvmppc_emulate_instruction(run, vcpu); |
|---|
| 806 | + er = kvmppc_emulate_instruction(vcpu); |
|---|
| 819 | 807 | switch (er) { |
|---|
| 820 | 808 | case EMULATE_DONE: |
|---|
| 821 | 809 | /* don't overwrite subtypes, just account kvm_stats */ |
|---|
| .. | .. |
|---|
| 832 | 820 | __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); |
|---|
| 833 | 821 | /* For debugging, encode the failing instruction and |
|---|
| 834 | 822 | * report it to userspace. */ |
|---|
| 835 | | - run->hw.hardware_exit_reason = ~0ULL << 32; |
|---|
| 836 | | - run->hw.hardware_exit_reason |= vcpu->arch.last_inst; |
|---|
| 823 | + vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; |
|---|
| 824 | + vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst; |
|---|
| 837 | 825 | kvmppc_core_queue_program(vcpu, ESR_PIL); |
|---|
| 838 | 826 | return RESUME_HOST; |
|---|
| 839 | 827 | |
|---|
| .. | .. |
|---|
| 845 | 833 | } |
|---|
| 846 | 834 | } |
|---|
| 847 | 835 | |
|---|
| 848 | | -static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) |
|---|
| 836 | +static int kvmppc_handle_debug(struct kvm_vcpu *vcpu) |
|---|
| 849 | 837 | { |
|---|
| 838 | + struct kvm_run *run = vcpu->run; |
|---|
| 850 | 839 | struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); |
|---|
| 851 | 840 | u32 dbsr = vcpu->arch.dbsr; |
|---|
| 852 | 841 | |
|---|
| .. | .. |
|---|
| 965 | 954 | } |
|---|
| 966 | 955 | } |
|---|
| 967 | 956 | |
|---|
| 968 | | -static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 957 | +static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu, |
|---|
| 969 | 958 | enum emulation_result emulated, u32 last_inst) |
|---|
| 970 | 959 | { |
|---|
| 971 | 960 | switch (emulated) { |
|---|
| .. | .. |
|---|
| 977 | 966 | __func__, vcpu->arch.regs.nip); |
|---|
| 978 | 967 | /* For debugging, encode the failing instruction and |
|---|
| 979 | 968 | * report it to userspace. */ |
|---|
| 980 | | - run->hw.hardware_exit_reason = ~0ULL << 32; |
|---|
| 981 | | - run->hw.hardware_exit_reason |= last_inst; |
|---|
| 969 | + vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; |
|---|
| 970 | + vcpu->run->hw.hardware_exit_reason |= last_inst; |
|---|
| 982 | 971 | kvmppc_core_queue_program(vcpu, ESR_PIL); |
|---|
| 983 | 972 | return RESUME_HOST; |
|---|
| 984 | 973 | |
|---|
| .. | .. |
|---|
| 992 | 981 | * |
|---|
| 993 | 982 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) |
|---|
| 994 | 983 | */ |
|---|
| 995 | | -int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 996 | | - unsigned int exit_nr) |
|---|
| 984 | +int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) |
|---|
| 997 | 985 | { |
|---|
| 986 | + struct kvm_run *run = vcpu->run; |
|---|
| 998 | 987 | int r = RESUME_HOST; |
|---|
| 999 | 988 | int s; |
|---|
| 1000 | 989 | int idx; |
|---|
| .. | .. |
|---|
| 1027 | 1016 | } |
|---|
| 1028 | 1017 | |
|---|
| 1029 | 1018 | trace_kvm_exit(exit_nr, vcpu); |
|---|
| 1030 | | - guest_exit_irqoff(); |
|---|
| 1019 | + |
|---|
| 1020 | + context_tracking_guest_exit(); |
|---|
| 1021 | + if (!vtime_accounting_enabled_this_cpu()) { |
|---|
| 1022 | + local_irq_enable(); |
|---|
| 1023 | + /* |
|---|
| 1024 | + * Service IRQs here before vtime_account_guest_exit() so any |
|---|
| 1025 | + * ticks that occurred while running the guest are accounted to |
|---|
| 1026 | + * the guest. If vtime accounting is enabled, accounting uses |
|---|
| 1027 | + * TB rather than ticks, so it can be done without enabling |
|---|
| 1028 | + * interrupts here, which has the problem that it accounts |
|---|
| 1029 | + * interrupt processing overhead to the host. |
|---|
| 1030 | + */ |
|---|
| 1031 | + local_irq_disable(); |
|---|
| 1032 | + } |
|---|
| 1033 | + vtime_account_guest_exit(); |
|---|
| 1031 | 1034 | |
|---|
| 1032 | 1035 | local_irq_enable(); |
|---|
| 1033 | 1036 | |
|---|
| .. | .. |
|---|
| 1035 | 1038 | run->ready_for_interrupt_injection = 1; |
|---|
| 1036 | 1039 | |
|---|
| 1037 | 1040 | if (emulated != EMULATE_DONE) { |
|---|
| 1038 | | - r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst); |
|---|
| 1041 | + r = kvmppc_resume_inst_load(vcpu, emulated, last_inst); |
|---|
| 1039 | 1042 | goto out; |
|---|
| 1040 | 1043 | } |
|---|
| 1041 | 1044 | |
|---|
| .. | .. |
|---|
| 1095 | 1098 | break; |
|---|
| 1096 | 1099 | |
|---|
| 1097 | 1100 | case BOOKE_INTERRUPT_HV_PRIV: |
|---|
| 1098 | | - r = emulation_exit(run, vcpu); |
|---|
| 1101 | + r = emulation_exit(vcpu); |
|---|
| 1099 | 1102 | break; |
|---|
| 1100 | 1103 | |
|---|
| 1101 | 1104 | case BOOKE_INTERRUPT_PROGRAM: |
|---|
| .. | .. |
|---|
| 1105 | 1108 | * We are here because of an SW breakpoint instr, |
|---|
| 1106 | 1109 | * so lets return to host to handle. |
|---|
| 1107 | 1110 | */ |
|---|
| 1108 | | - r = kvmppc_handle_debug(run, vcpu); |
|---|
| 1111 | + r = kvmppc_handle_debug(vcpu); |
|---|
| 1109 | 1112 | run->exit_reason = KVM_EXIT_DEBUG; |
|---|
| 1110 | 1113 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
|---|
| 1111 | 1114 | break; |
|---|
| .. | .. |
|---|
| 1126 | 1129 | break; |
|---|
| 1127 | 1130 | } |
|---|
| 1128 | 1131 | |
|---|
| 1129 | | - r = emulation_exit(run, vcpu); |
|---|
| 1132 | + r = emulation_exit(vcpu); |
|---|
| 1130 | 1133 | break; |
|---|
| 1131 | 1134 | |
|---|
| 1132 | 1135 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
|---|
| .. | .. |
|---|
| 1293 | 1296 | * actually RAM. */ |
|---|
| 1294 | 1297 | vcpu->arch.paddr_accessed = gpaddr; |
|---|
| 1295 | 1298 | vcpu->arch.vaddr_accessed = eaddr; |
|---|
| 1296 | | - r = kvmppc_emulate_mmio(run, vcpu); |
|---|
| 1299 | + r = kvmppc_emulate_mmio(vcpu); |
|---|
| 1297 | 1300 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
|---|
| 1298 | 1301 | } |
|---|
| 1299 | 1302 | |
|---|
| .. | .. |
|---|
| 1344 | 1347 | } |
|---|
| 1345 | 1348 | |
|---|
| 1346 | 1349 | case BOOKE_INTERRUPT_DEBUG: { |
|---|
| 1347 | | - r = kvmppc_handle_debug(run, vcpu); |
|---|
| 1350 | + r = kvmppc_handle_debug(vcpu); |
|---|
| 1348 | 1351 | if (r == RESUME_HOST) |
|---|
| 1349 | 1352 | run->exit_reason = KVM_EXIT_DEBUG; |
|---|
| 1350 | 1353 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
|---|
| .. | .. |
|---|
| 1386 | 1389 | arm_next_watchdog(vcpu); |
|---|
| 1387 | 1390 | |
|---|
| 1388 | 1391 | update_timer_ints(vcpu); |
|---|
| 1389 | | -} |
|---|
| 1390 | | - |
|---|
| 1391 | | -/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
|---|
| 1392 | | -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
|---|
| 1393 | | -{ |
|---|
| 1394 | | - int i; |
|---|
| 1395 | | - int r; |
|---|
| 1396 | | - |
|---|
| 1397 | | - vcpu->arch.regs.nip = 0; |
|---|
| 1398 | | - vcpu->arch.shared->pir = vcpu->vcpu_id; |
|---|
| 1399 | | - kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
|---|
| 1400 | | - kvmppc_set_msr(vcpu, 0); |
|---|
| 1401 | | - |
|---|
| 1402 | | -#ifndef CONFIG_KVM_BOOKE_HV |
|---|
| 1403 | | - vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; |
|---|
| 1404 | | - vcpu->arch.shadow_pid = 1; |
|---|
| 1405 | | - vcpu->arch.shared->msr = 0; |
|---|
| 1406 | | -#endif |
|---|
| 1407 | | - |
|---|
| 1408 | | - /* Eye-catching numbers so we know if the guest takes an interrupt |
|---|
| 1409 | | - * before it's programmed its own IVPR/IVORs. */ |
|---|
| 1410 | | - vcpu->arch.ivpr = 0x55550000; |
|---|
| 1411 | | - for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) |
|---|
| 1412 | | - vcpu->arch.ivor[i] = 0x7700 | i * 4; |
|---|
| 1413 | | - |
|---|
| 1414 | | - kvmppc_init_timing_stats(vcpu); |
|---|
| 1415 | | - |
|---|
| 1416 | | - r = kvmppc_core_vcpu_setup(vcpu); |
|---|
| 1417 | | - kvmppc_sanity_check(vcpu); |
|---|
| 1418 | | - return r; |
|---|
| 1419 | 1392 | } |
|---|
| 1420 | 1393 | |
|---|
| 1421 | 1394 | int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) |
|---|
| .. | .. |
|---|
| 1788 | 1761 | |
|---|
| 1789 | 1762 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
|---|
| 1790 | 1763 | { |
|---|
| 1791 | | - return -ENOTSUPP; |
|---|
| 1764 | + return -EOPNOTSUPP; |
|---|
| 1792 | 1765 | } |
|---|
| 1793 | 1766 | |
|---|
| 1794 | 1767 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
|---|
| 1795 | 1768 | { |
|---|
| 1796 | | - return -ENOTSUPP; |
|---|
| 1769 | + return -EOPNOTSUPP; |
|---|
| 1797 | 1770 | } |
|---|
| 1798 | 1771 | |
|---|
| 1799 | 1772 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
|---|
| .. | .. |
|---|
| 1807 | 1780 | return r; |
|---|
| 1808 | 1781 | } |
|---|
| 1809 | 1782 | |
|---|
| 1783 | +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
|---|
| 1784 | +{ |
|---|
| 1785 | + |
|---|
| 1786 | +} |
|---|
| 1787 | + |
|---|
| 1810 | 1788 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
|---|
| 1811 | 1789 | { |
|---|
| 1812 | | - return -ENOTSUPP; |
|---|
| 1790 | + return -EOPNOTSUPP; |
|---|
| 1813 | 1791 | } |
|---|
| 1814 | 1792 | |
|---|
| 1815 | | -void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
|---|
| 1816 | | - struct kvm_memory_slot *dont) |
|---|
| 1793 | +void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
|---|
| 1817 | 1794 | { |
|---|
| 1818 | | -} |
|---|
| 1819 | | - |
|---|
| 1820 | | -int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
|---|
| 1821 | | - unsigned long npages) |
|---|
| 1822 | | -{ |
|---|
| 1823 | | - return 0; |
|---|
| 1824 | 1795 | } |
|---|
| 1825 | 1796 | |
|---|
| 1826 | 1797 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
|---|
| 1827 | 1798 | struct kvm_memory_slot *memslot, |
|---|
| 1828 | | - const struct kvm_userspace_memory_region *mem) |
|---|
| 1799 | + const struct kvm_userspace_memory_region *mem, |
|---|
| 1800 | + enum kvm_mr_change change) |
|---|
| 1829 | 1801 | { |
|---|
| 1830 | 1802 | return 0; |
|---|
| 1831 | 1803 | } |
|---|
| .. | .. |
|---|
| 1833 | 1805 | void kvmppc_core_commit_memory_region(struct kvm *kvm, |
|---|
| 1834 | 1806 | const struct kvm_userspace_memory_region *mem, |
|---|
| 1835 | 1807 | const struct kvm_memory_slot *old, |
|---|
| 1836 | | - const struct kvm_memory_slot *new) |
|---|
| 1808 | + const struct kvm_memory_slot *new, |
|---|
| 1809 | + enum kvm_mr_change change) |
|---|
| 1837 | 1810 | { |
|---|
| 1838 | 1811 | } |
|---|
| 1839 | 1812 | |
|---|
| .. | .. |
|---|
| 2114 | 2087 | kvmppc_clear_dbsr(); |
|---|
| 2115 | 2088 | } |
|---|
| 2116 | 2089 | |
|---|
| 2117 | | -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
|---|
| 2118 | | -{ |
|---|
| 2119 | | - vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); |
|---|
| 2120 | | -} |
|---|
| 2121 | | - |
|---|
| 2122 | 2090 | int kvmppc_core_init_vm(struct kvm *kvm) |
|---|
| 2123 | 2091 | { |
|---|
| 2124 | 2092 | return kvm->arch.kvm_ops->init_vm(kvm); |
|---|
| 2125 | 2093 | } |
|---|
| 2126 | 2094 | |
|---|
| 2127 | | -struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) |
|---|
| 2095 | +int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu) |
|---|
| 2128 | 2096 | { |
|---|
| 2129 | | - return kvm->arch.kvm_ops->vcpu_create(kvm, id); |
|---|
| 2097 | + int i; |
|---|
| 2098 | + int r; |
|---|
| 2099 | + |
|---|
| 2100 | + r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu); |
|---|
| 2101 | + if (r) |
|---|
| 2102 | + return r; |
|---|
| 2103 | + |
|---|
| 2104 | + /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
|---|
| 2105 | + vcpu->arch.regs.nip = 0; |
|---|
| 2106 | + vcpu->arch.shared->pir = vcpu->vcpu_id; |
|---|
| 2107 | + kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
|---|
| 2108 | + kvmppc_set_msr(vcpu, 0); |
|---|
| 2109 | + |
|---|
| 2110 | +#ifndef CONFIG_KVM_BOOKE_HV |
|---|
| 2111 | + vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; |
|---|
| 2112 | + vcpu->arch.shadow_pid = 1; |
|---|
| 2113 | + vcpu->arch.shared->msr = 0; |
|---|
| 2114 | +#endif |
|---|
| 2115 | + |
|---|
| 2116 | + /* Eye-catching numbers so we know if the guest takes an interrupt |
|---|
| 2117 | + * before it's programmed its own IVPR/IVORs. */ |
|---|
| 2118 | + vcpu->arch.ivpr = 0x55550000; |
|---|
| 2119 | + for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) |
|---|
| 2120 | + vcpu->arch.ivor[i] = 0x7700 | i * 4; |
|---|
| 2121 | + |
|---|
| 2122 | + kvmppc_init_timing_stats(vcpu); |
|---|
| 2123 | + |
|---|
| 2124 | + r = kvmppc_core_vcpu_setup(vcpu); |
|---|
| 2125 | + if (r) |
|---|
| 2126 | + vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); |
|---|
| 2127 | + kvmppc_sanity_check(vcpu); |
|---|
| 2128 | + return r; |
|---|
| 2130 | 2129 | } |
|---|
| 2131 | 2130 | |
|---|
| 2132 | 2131 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) |
|---|