| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 3 | | - * it under the terms of the GNU General Public License, version 2, as |
|---|
| 4 | | - * published by the Free Software Foundation. |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 7 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 8 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 9 | | - * GNU General Public License for more details. |
|---|
| 10 | | - * |
|---|
| 11 | | - * You should have received a copy of the GNU General Public License |
|---|
| 12 | | - * along with this program; if not, write to the Free Software |
|---|
| 13 | | - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
|---|
| 14 | 3 | * |
|---|
| 15 | 4 | * Copyright IBM Corp. 2008 |
|---|
| 16 | 5 | * |
|---|
| .. | .. |
|---|
| 36 | 25 | #endif |
|---|
| 37 | 26 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
|---|
| 38 | 27 | #include <asm/paca.h> |
|---|
| 28 | +#include <asm/xive.h> |
|---|
| 29 | +#include <asm/cpu_has_feature.h> |
|---|
| 39 | 30 | #endif |
|---|
| 40 | 31 | |
|---|
| 41 | 32 | /* |
|---|
| .. | .. |
|---|
| 67 | 58 | XLATE_WRITE /* check for write permissions */ |
|---|
| 68 | 59 | }; |
|---|
| 69 | 60 | |
|---|
| 70 | | -extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
|---|
| 71 | | -extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
|---|
| 61 | +extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); |
|---|
| 62 | +extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu); |
|---|
| 72 | 63 | extern void kvmppc_handler_highmem(void); |
|---|
| 73 | 64 | |
|---|
| 74 | 65 | extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); |
|---|
| 75 | | -extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 66 | +extern int kvmppc_handle_load(struct kvm_vcpu *vcpu, |
|---|
| 76 | 67 | unsigned int rt, unsigned int bytes, |
|---|
| 77 | 68 | int is_default_endian); |
|---|
| 78 | | -extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 69 | +extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu, |
|---|
| 79 | 70 | unsigned int rt, unsigned int bytes, |
|---|
| 80 | 71 | int is_default_endian); |
|---|
| 81 | | -extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 72 | +extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, |
|---|
| 82 | 73 | unsigned int rt, unsigned int bytes, |
|---|
| 83 | 74 | int is_default_endian, int mmio_sign_extend); |
|---|
| 84 | | -extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 75 | +extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, |
|---|
| 85 | 76 | unsigned int rt, unsigned int bytes, int is_default_endian); |
|---|
| 86 | | -extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 77 | +extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, |
|---|
| 87 | 78 | unsigned int rs, unsigned int bytes, int is_default_endian); |
|---|
| 88 | | -extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 79 | +extern int kvmppc_handle_store(struct kvm_vcpu *vcpu, |
|---|
| 89 | 80 | u64 val, unsigned int bytes, |
|---|
| 90 | 81 | int is_default_endian); |
|---|
| 91 | | -extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 82 | +extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, |
|---|
| 92 | 83 | int rs, unsigned int bytes, |
|---|
| 93 | 84 | int is_default_endian); |
|---|
| 94 | 85 | |
|---|
| .. | .. |
|---|
| 99 | 90 | bool data); |
|---|
| 100 | 91 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
|---|
| 101 | 92 | bool data); |
|---|
| 102 | | -extern int kvmppc_emulate_instruction(struct kvm_run *run, |
|---|
| 103 | | - struct kvm_vcpu *vcpu); |
|---|
| 93 | +extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu); |
|---|
| 104 | 94 | extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); |
|---|
| 105 | | -extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
|---|
| 95 | +extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu); |
|---|
| 106 | 96 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); |
|---|
| 107 | 97 | extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); |
|---|
| 108 | 98 | extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); |
|---|
| .. | .. |
|---|
| 116 | 106 | unsigned int gtlb_idx); |
|---|
| 117 | 107 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
|---|
| 118 | 108 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
|---|
| 119 | | -extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); |
|---|
| 120 | | -extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu); |
|---|
| 121 | 109 | extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
|---|
| 122 | 110 | extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
|---|
| 123 | 111 | extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, |
|---|
| .. | .. |
|---|
| 128 | 116 | enum xlate_instdata xlid, enum xlate_readwrite xlrw, |
|---|
| 129 | 117 | struct kvmppc_pte *pte); |
|---|
| 130 | 118 | |
|---|
| 131 | | -extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, |
|---|
| 132 | | - unsigned int id); |
|---|
| 119 | +extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu); |
|---|
| 133 | 120 | extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); |
|---|
| 134 | 121 | extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); |
|---|
| 135 | 122 | extern int kvmppc_core_check_processor_compat(void); |
|---|
| .. | .. |
|---|
| 141 | 128 | |
|---|
| 142 | 129 | extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); |
|---|
| 143 | 130 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
|---|
| 131 | +extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags); |
|---|
| 144 | 132 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); |
|---|
| 145 | 133 | extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); |
|---|
| 146 | 134 | extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); |
|---|
| .. | .. |
|---|
| 194 | 182 | (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ |
|---|
| 195 | 183 | (stt)->size, (ioba), (npages)) ? \ |
|---|
| 196 | 184 | H_PARAMETER : H_SUCCESS) |
|---|
| 197 | | -extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, |
|---|
| 198 | | - unsigned long tce); |
|---|
| 199 | | -extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, |
|---|
| 200 | | - unsigned long *ua, unsigned long **prmap); |
|---|
| 201 | | -extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, |
|---|
| 202 | | - unsigned long idx, unsigned long tce); |
|---|
| 203 | 185 | extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
|---|
| 204 | 186 | unsigned long ioba, unsigned long tce); |
|---|
| 205 | 187 | extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, |
|---|
| .. | .. |
|---|
| 215 | 197 | extern int kvmppc_core_init_vm(struct kvm *kvm); |
|---|
| 216 | 198 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); |
|---|
| 217 | 199 | extern void kvmppc_core_free_memslot(struct kvm *kvm, |
|---|
| 218 | | - struct kvm_memory_slot *free, |
|---|
| 219 | | - struct kvm_memory_slot *dont); |
|---|
| 220 | | -extern int kvmppc_core_create_memslot(struct kvm *kvm, |
|---|
| 221 | | - struct kvm_memory_slot *slot, |
|---|
| 222 | | - unsigned long npages); |
|---|
| 200 | + struct kvm_memory_slot *slot); |
|---|
| 223 | 201 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
|---|
| 224 | 202 | struct kvm_memory_slot *memslot, |
|---|
| 225 | | - const struct kvm_userspace_memory_region *mem); |
|---|
| 203 | + const struct kvm_userspace_memory_region *mem, |
|---|
| 204 | + enum kvm_mr_change change); |
|---|
| 226 | 205 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, |
|---|
| 227 | 206 | const struct kvm_userspace_memory_region *mem, |
|---|
| 228 | 207 | const struct kvm_memory_slot *old, |
|---|
| 229 | | - const struct kvm_memory_slot *new); |
|---|
| 208 | + const struct kvm_memory_slot *new, |
|---|
| 209 | + enum kvm_mr_change change); |
|---|
| 230 | 210 | extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, |
|---|
| 231 | 211 | struct kvm_ppc_smmu_info *info); |
|---|
| 232 | 212 | extern void kvmppc_core_flush_memslot(struct kvm *kvm, |
|---|
| .. | .. |
|---|
| 271 | 251 | u64 addr; |
|---|
| 272 | 252 | u64 length; |
|---|
| 273 | 253 | } vpaval; |
|---|
| 254 | + u64 xive_timaval[2]; |
|---|
| 274 | 255 | }; |
|---|
| 275 | 256 | |
|---|
| 276 | 257 | struct kvmppc_ops { |
|---|
| .. | .. |
|---|
| 283 | 264 | union kvmppc_one_reg *val); |
|---|
| 284 | 265 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
|---|
| 285 | 266 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
|---|
| 267 | + void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); |
|---|
| 286 | 268 | void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); |
|---|
| 287 | | - int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
|---|
| 288 | | - struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id); |
|---|
| 269 | + int (*vcpu_run)(struct kvm_vcpu *vcpu); |
|---|
| 270 | + int (*vcpu_create)(struct kvm_vcpu *vcpu); |
|---|
| 289 | 271 | void (*vcpu_free)(struct kvm_vcpu *vcpu); |
|---|
| 290 | 272 | int (*check_requests)(struct kvm_vcpu *vcpu); |
|---|
| 291 | 273 | int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); |
|---|
| 292 | 274 | void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); |
|---|
| 293 | 275 | int (*prepare_memory_region)(struct kvm *kvm, |
|---|
| 294 | 276 | struct kvm_memory_slot *memslot, |
|---|
| 295 | | - const struct kvm_userspace_memory_region *mem); |
|---|
| 277 | + const struct kvm_userspace_memory_region *mem, |
|---|
| 278 | + enum kvm_mr_change change); |
|---|
| 296 | 279 | void (*commit_memory_region)(struct kvm *kvm, |
|---|
| 297 | 280 | const struct kvm_userspace_memory_region *mem, |
|---|
| 298 | 281 | const struct kvm_memory_slot *old, |
|---|
| 299 | | - const struct kvm_memory_slot *new); |
|---|
| 282 | + const struct kvm_memory_slot *new, |
|---|
| 283 | + enum kvm_mr_change change); |
|---|
| 300 | 284 | int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, |
|---|
| 301 | 285 | unsigned long end); |
|---|
| 302 | 286 | int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); |
|---|
| 303 | 287 | int (*test_age_hva)(struct kvm *kvm, unsigned long hva); |
|---|
| 304 | 288 | void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); |
|---|
| 305 | | - void (*mmu_destroy)(struct kvm_vcpu *vcpu); |
|---|
| 306 | | - void (*free_memslot)(struct kvm_memory_slot *free, |
|---|
| 307 | | - struct kvm_memory_slot *dont); |
|---|
| 308 | | - int (*create_memslot)(struct kvm_memory_slot *slot, |
|---|
| 309 | | - unsigned long npages); |
|---|
| 289 | + void (*free_memslot)(struct kvm_memory_slot *slot); |
|---|
| 310 | 290 | int (*init_vm)(struct kvm *kvm); |
|---|
| 311 | 291 | void (*destroy_vm)(struct kvm *kvm); |
|---|
| 312 | 292 | int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); |
|---|
| 313 | | - int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, |
|---|
| 293 | + int (*emulate_op)(struct kvm_vcpu *vcpu, |
|---|
| 314 | 294 | unsigned int inst, int *advance); |
|---|
| 315 | 295 | int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); |
|---|
| 316 | 296 | int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); |
|---|
| .. | .. |
|---|
| 327 | 307 | int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, |
|---|
| 328 | 308 | unsigned long flags); |
|---|
| 329 | 309 | void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); |
|---|
| 310 | + int (*enable_nested)(struct kvm *kvm); |
|---|
| 311 | + int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, |
|---|
| 312 | + int size); |
|---|
| 313 | + int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, |
|---|
| 314 | + int size); |
|---|
| 315 | + int (*enable_svm)(struct kvm *kvm); |
|---|
| 316 | + int (*svm_off)(struct kvm *kvm); |
|---|
| 330 | 317 | }; |
|---|
| 331 | 318 | |
|---|
| 332 | 319 | extern struct kvmppc_ops *kvmppc_hv_ops; |
|---|
| .. | .. |
|---|
| 563 | 550 | extern void kvm_hv_vm_deactivated(void); |
|---|
| 564 | 551 | extern bool kvm_hv_mode_active(void); |
|---|
| 565 | 552 | |
|---|
| 553 | +extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, |
|---|
| 554 | + struct kvm_nested_guest *nested); |
|---|
| 555 | + |
|---|
| 566 | 556 | #else |
|---|
| 567 | 557 | static inline void __init kvm_cma_reserve(void) |
|---|
| 568 | 558 | {} |
|---|
| .. | .. |
|---|
| 679 | 669 | |
|---|
| 680 | 670 | extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, |
|---|
| 681 | 671 | int level, bool line_status); |
|---|
| 672 | +extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); |
|---|
| 673 | + |
|---|
| 674 | +static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) |
|---|
| 675 | +{ |
|---|
| 676 | + return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE; |
|---|
| 677 | +} |
|---|
| 678 | + |
|---|
| 679 | +extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, |
|---|
| 680 | + struct kvm_vcpu *vcpu, u32 cpu); |
|---|
| 681 | +extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu); |
|---|
| 682 | +extern void kvmppc_xive_native_init_module(void); |
|---|
| 683 | +extern void kvmppc_xive_native_exit_module(void); |
|---|
| 684 | +extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, |
|---|
| 685 | + union kvmppc_one_reg *val); |
|---|
| 686 | +extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, |
|---|
| 687 | + union kvmppc_one_reg *val); |
|---|
| 688 | +extern bool kvmppc_xive_native_supported(void); |
|---|
| 689 | + |
|---|
| 682 | 690 | #else |
|---|
| 683 | 691 | static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, |
|---|
| 684 | 692 | u32 priority) { return -1; } |
|---|
| .. | .. |
|---|
| 701 | 709 | |
|---|
| 702 | 710 | static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, |
|---|
| 703 | 711 | int level, bool line_status) { return -ENODEV; } |
|---|
| 712 | +static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } |
|---|
| 713 | + |
|---|
| 714 | +static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) |
|---|
| 715 | + { return 0; } |
|---|
| 716 | +static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, |
|---|
| 717 | + struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } |
|---|
| 718 | +static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { } |
|---|
| 719 | +static inline void kvmppc_xive_native_init_module(void) { } |
|---|
| 720 | +static inline void kvmppc_xive_native_exit_module(void) { } |
|---|
| 721 | +static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, |
|---|
| 722 | + union kvmppc_one_reg *val) |
|---|
| 723 | +{ return 0; } |
|---|
| 724 | +static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, |
|---|
| 725 | + union kvmppc_one_reg *val) |
|---|
| 726 | +{ return -ENOENT; } |
|---|
| 727 | + |
|---|
| 704 | 728 | #endif /* CONFIG_KVM_XIVE */ |
|---|
| 729 | + |
|---|
| 730 | +#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER) |
|---|
| 731 | +static inline bool xics_on_xive(void) |
|---|
| 732 | +{ |
|---|
| 733 | + return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE); |
|---|
| 734 | +} |
|---|
| 735 | +#else |
|---|
| 736 | +static inline bool xics_on_xive(void) |
|---|
| 737 | +{ |
|---|
| 738 | + return false; |
|---|
| 739 | +} |
|---|
| 740 | +#endif |
|---|
| 705 | 741 | |
|---|
| 706 | 742 | /* |
|---|
| 707 | 743 | * Prototypes for functions called only from assembler code. |
|---|
| .. | .. |
|---|
| 719 | 755 | unsigned int yield_count); |
|---|
| 720 | 756 | long kvmppc_h_random(struct kvm_vcpu *vcpu); |
|---|
| 721 | 757 | void kvmhv_commence_exit(int trap); |
|---|
| 722 | | -long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); |
|---|
| 758 | +void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); |
|---|
| 723 | 759 | void kvmppc_subcore_enter_guest(void); |
|---|
| 724 | 760 | void kvmppc_subcore_exit_guest(void); |
|---|
| 725 | 761 | long kvmppc_realmode_hmi_handler(void); |
|---|
| .. | .. |
|---|
| 737 | 773 | unsigned long pte_index); |
|---|
| 738 | 774 | long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, |
|---|
| 739 | 775 | unsigned long pte_index); |
|---|
| 776 | +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, |
|---|
| 777 | + unsigned long dest, unsigned long src); |
|---|
| 740 | 778 | long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, |
|---|
| 741 | 779 | unsigned long slb_v, unsigned int status, bool data); |
|---|
| 742 | 780 | unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); |
|---|
| .. | .. |
|---|
| 746 | 784 | unsigned long mfrr); |
|---|
| 747 | 785 | int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); |
|---|
| 748 | 786 | int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); |
|---|
| 787 | +void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu); |
|---|
| 749 | 788 | |
|---|
| 750 | 789 | /* |
|---|
| 751 | 790 | * Host-side operations we want to set up while running in real |
|---|