.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | #ifndef __KVM_HOST_H |
---|
2 | 3 | #define __KVM_HOST_H |
---|
3 | 4 | |
---|
4 | | -/* |
---|
5 | | - * This work is licensed under the terms of the GNU GPL, version 2. See |
---|
6 | | - * the COPYING file in the top-level directory. |
---|
7 | | - */ |
---|
8 | 5 | |
---|
9 | 6 | #include <linux/types.h> |
---|
10 | 7 | #include <linux/hardirq.h> |
---|
.. | .. |
---|
26 | 23 | #include <linux/irqflags.h> |
---|
27 | 24 | #include <linux/context_tracking.h> |
---|
28 | 25 | #include <linux/irqbypass.h> |
---|
29 | | -#include <linux/swait.h> |
---|
| 26 | +#include <linux/rcuwait.h> |
---|
30 | 27 | #include <linux/refcount.h> |
---|
31 | 28 | #include <linux/nospec.h> |
---|
32 | 29 | #include <asm/signal.h> |
---|
.. | .. |
---|
48 | 45 | * include/linux/kvm_h. |
---|
49 | 46 | */ |
---|
50 | 47 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
---|
| 48 | + |
---|
| 49 | +/* |
---|
| 50 | + * Bit 63 of the memslot generation number is an "update in-progress flag", |
---|
| 51 | + * e.g. is temporarily set for the duration of install_new_memslots(). |
---|
| 52 | + * This flag effectively creates a unique generation number that is used to |
---|
| 53 | + * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, |
---|
| 54 | + * i.e. may (or may not) have come from the previous memslots generation. |
---|
| 55 | + * |
---|
| 56 | + * This is necessary because the actual memslots update is not atomic with |
---|
| 57 | + * respect to the generation number update. Updating the generation number |
---|
| 58 | + * first would allow a vCPU to cache a spte from the old memslots using the |
---|
| 59 | + * new generation number, and updating the generation number after switching |
---|
| 60 | + * to the new memslots would allow cache hits using the old generation number |
---|
| 61 | + * to reference the defunct memslots. |
---|
| 62 | + * |
---|
| 63 | + * This mechanism is used to prevent getting hits in KVM's caches while a |
---|
| 64 | + * memslot update is in-progress, and to prevent cache hits *after* updating |
---|
| 65 | + * the actual generation number against accesses that were inserted into the |
---|
| 66 | + * cache *before* the memslots were updated. |
---|
| 67 | + */ |
---|
| 68 | +#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) |
---|
51 | 69 | |
---|
52 | 70 | /* Two fragments for cross MMIO pages. */ |
---|
53 | 71 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
---|
.. | .. |
---|
128 | 146 | #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
---|
129 | 147 | #define KVM_REQ_PENDING_TIMER 2 |
---|
130 | 148 | #define KVM_REQ_UNHALT 3 |
---|
| 149 | +#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
---|
131 | 150 | #define KVM_REQUEST_ARCH_BASE 8 |
---|
132 | 151 | |
---|
133 | 152 | #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ |
---|
134 | | - BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ |
---|
| 153 | + BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ |
---|
135 | 154 | (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ |
---|
136 | 155 | }) |
---|
137 | 156 | #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) |
---|
138 | 157 | |
---|
139 | 158 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
---|
140 | 159 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
---|
141 | | - |
---|
142 | | -extern struct kmem_cache *kvm_vcpu_cache; |
---|
143 | 160 | |
---|
144 | 161 | extern struct mutex kvm_lock; |
---|
145 | 162 | extern struct list_head vm_list; |
---|
.. | .. |
---|
174 | 191 | int len, void *val); |
---|
175 | 192 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
---|
176 | 193 | int len, struct kvm_io_device *dev); |
---|
177 | | -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
---|
178 | | - struct kvm_io_device *dev); |
---|
| 194 | +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
---|
| 195 | + struct kvm_io_device *dev); |
---|
179 | 196 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
---|
180 | 197 | gpa_t addr); |
---|
181 | 198 | |
---|
.. | .. |
---|
190 | 207 | unsigned long addr; |
---|
191 | 208 | struct kvm_arch_async_pf arch; |
---|
192 | 209 | bool wakeup_all; |
---|
| 210 | + bool notpresent_injected; |
---|
193 | 211 | }; |
---|
194 | 212 | |
---|
195 | 213 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); |
---|
196 | 214 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); |
---|
197 | | -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
---|
198 | | - unsigned long hva, struct kvm_arch_async_pf *arch); |
---|
| 215 | +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
---|
| 216 | + unsigned long hva, struct kvm_arch_async_pf *arch); |
---|
199 | 217 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
---|
200 | 218 | #endif |
---|
201 | 219 | |
---|
.. | .. |
---|
261 | 279 | struct mutex mutex; |
---|
262 | 280 | struct kvm_run *run; |
---|
263 | 281 | |
---|
264 | | - int guest_xcr0_loaded; |
---|
265 | | - struct swait_queue_head wq; |
---|
| 282 | + struct rcuwait wait; |
---|
266 | 283 | struct pid __rcu *pid; |
---|
267 | 284 | int sigset_active; |
---|
268 | 285 | sigset_t sigset; |
---|
.. | .. |
---|
301 | 318 | } spin_loop; |
---|
302 | 319 | #endif |
---|
303 | 320 | bool preempted; |
---|
| 321 | + bool ready; |
---|
304 | 322 | struct kvm_vcpu_arch arch; |
---|
305 | | - struct dentry *debugfs_dentry; |
---|
306 | 323 | }; |
---|
307 | 324 | |
---|
308 | 325 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
330 | 347 | unsigned long userspace_addr; |
---|
331 | 348 | u32 flags; |
---|
332 | 349 | short id; |
---|
| 350 | + u16 as_id; |
---|
333 | 351 | }; |
---|
334 | 352 | |
---|
335 | 353 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
---|
.. | .. |
---|
343 | 361 | |
---|
344 | 362 | return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); |
---|
345 | 363 | } |
---|
| 364 | + |
---|
| 365 | +#ifndef KVM_DIRTY_LOG_MANUAL_CAPS |
---|
| 366 | +#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
---|
| 367 | +#endif |
---|
346 | 368 | |
---|
347 | 369 | struct kvm_s390_adapter_int { |
---|
348 | 370 | u64 ind_addr; |
---|
.. | .. |
---|
389 | 411 | * Array indexed by gsi. Each entry contains list of irq chips |
---|
390 | 412 | * the gsi is connected to. |
---|
391 | 413 | */ |
---|
392 | | - struct hlist_head map[0]; |
---|
| 414 | + struct hlist_head map[]; |
---|
393 | 415 | }; |
---|
394 | 416 | #endif |
---|
395 | 417 | |
---|
.. | .. |
---|
415 | 437 | */ |
---|
416 | 438 | struct kvm_memslots { |
---|
417 | 439 | u64 generation; |
---|
418 | | - struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
---|
419 | 440 | /* The mapping table from slot id to the index in memslots[]. */ |
---|
420 | 441 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
---|
421 | 442 | atomic_t lru_slot; |
---|
422 | 443 | int used_slots; |
---|
| 444 | + struct kvm_memory_slot memslots[]; |
---|
423 | 445 | }; |
---|
424 | 446 | |
---|
425 | 447 | struct kvm { |
---|
.. | .. |
---|
477 | 499 | #endif |
---|
478 | 500 | long tlbs_dirty; |
---|
479 | 501 | struct list_head devices; |
---|
| 502 | + u64 manual_dirty_log_protect; |
---|
480 | 503 | struct dentry *debugfs_dentry; |
---|
481 | 504 | struct kvm_stat_data **debugfs_stat_data; |
---|
482 | 505 | struct srcu_struct srcu; |
---|
483 | 506 | struct srcu_struct irq_srcu; |
---|
484 | 507 | pid_t userspace_pid; |
---|
| 508 | + unsigned int max_halt_poll_ns; |
---|
| 509 | + bool vm_bugged; |
---|
485 | 510 | }; |
---|
486 | 511 | |
---|
487 | 512 | #define kvm_err(fmt, ...) \ |
---|
.. | .. |
---|
509 | 534 | ## __VA_ARGS__) |
---|
510 | 535 | #define vcpu_err(vcpu, fmt, ...) \ |
---|
511 | 536 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
---|
| 537 | + |
---|
| 538 | +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
---|
| 539 | +static inline void kvm_vm_bugged(struct kvm *kvm) |
---|
| 540 | +{ |
---|
| 541 | + kvm->vm_bugged = true; |
---|
| 542 | + kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED); |
---|
| 543 | +} |
---|
| 544 | + |
---|
| 545 | +#define KVM_BUG(cond, kvm, fmt...) \ |
---|
| 546 | +({ \ |
---|
| 547 | + int __ret = (cond); \ |
---|
| 548 | + \ |
---|
| 549 | + if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ |
---|
| 550 | + kvm_vm_bugged(kvm); \ |
---|
| 551 | + unlikely(__ret); \ |
---|
| 552 | +}) |
---|
| 553 | + |
---|
| 554 | +#define KVM_BUG_ON(cond, kvm) \ |
---|
| 555 | +({ \ |
---|
| 556 | + int __ret = (cond); \ |
---|
| 557 | + \ |
---|
| 558 | + if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ |
---|
| 559 | + kvm_vm_bugged(kvm); \ |
---|
| 560 | + unlikely(__ret); \ |
---|
| 561 | +}) |
---|
| 562 | + |
---|
| 563 | +static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) |
---|
| 564 | +{ |
---|
| 565 | + return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); |
---|
| 566 | +} |
---|
512 | 567 | |
---|
513 | 568 | static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) |
---|
514 | 569 | { |
---|
.. | .. |
---|
555 | 610 | return vcpu->vcpu_idx; |
---|
556 | 611 | } |
---|
557 | 612 | |
---|
558 | | -#define kvm_for_each_memslot(memslot, slots) \ |
---|
559 | | - for (memslot = &slots->memslots[0]; \ |
---|
560 | | - memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
---|
561 | | - memslot++) |
---|
| 613 | +#define kvm_for_each_memslot(memslot, slots) \ |
---|
| 614 | + for (memslot = &slots->memslots[0]; \ |
---|
| 615 | + memslot < slots->memslots + slots->used_slots; memslot++) \ |
---|
| 616 | + if (WARN_ON_ONCE(!memslot->npages)) { \ |
---|
| 617 | + } else |
---|
562 | 618 | |
---|
563 | | -int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
---|
564 | | -void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
---|
| 619 | +void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); |
---|
565 | 620 | |
---|
566 | 621 | void vcpu_load(struct kvm_vcpu *vcpu); |
---|
567 | 622 | void vcpu_put(struct kvm_vcpu *vcpu); |
---|
.. | .. |
---|
597 | 652 | |
---|
598 | 653 | void kvm_get_kvm(struct kvm *kvm); |
---|
599 | 654 | void kvm_put_kvm(struct kvm *kvm); |
---|
| 655 | +void kvm_put_kvm_no_destroy(struct kvm *kvm); |
---|
600 | 656 | |
---|
601 | 657 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
---|
602 | 658 | { |
---|
.. | .. |
---|
618 | 674 | return __kvm_memslots(vcpu->kvm, as_id); |
---|
619 | 675 | } |
---|
620 | 676 | |
---|
621 | | -static inline struct kvm_memory_slot * |
---|
622 | | -id_to_memslot(struct kvm_memslots *slots, int id) |
---|
| 677 | +static inline |
---|
| 678 | +struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) |
---|
623 | 679 | { |
---|
624 | 680 | int index = slots->id_to_index[id]; |
---|
625 | 681 | struct kvm_memory_slot *slot; |
---|
| 682 | + |
---|
| 683 | + if (index < 0) |
---|
| 684 | + return NULL; |
---|
626 | 685 | |
---|
627 | 686 | slot = &slots->memslots[index]; |
---|
628 | 687 | |
---|
.. | .. |
---|
652 | 711 | const struct kvm_userspace_memory_region *mem); |
---|
653 | 712 | int __kvm_set_memory_region(struct kvm *kvm, |
---|
654 | 713 | const struct kvm_userspace_memory_region *mem); |
---|
655 | | -void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
---|
656 | | - struct kvm_memory_slot *dont); |
---|
657 | | -int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
---|
658 | | - unsigned long npages); |
---|
| 714 | +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); |
---|
659 | 715 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); |
---|
660 | 716 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
---|
661 | 717 | struct kvm_memory_slot *memslot, |
---|
.. | .. |
---|
663 | 719 | enum kvm_mr_change change); |
---|
664 | 720 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
---|
665 | 721 | const struct kvm_userspace_memory_region *mem, |
---|
666 | | - const struct kvm_memory_slot *old, |
---|
| 722 | + struct kvm_memory_slot *old, |
---|
667 | 723 | const struct kvm_memory_slot *new, |
---|
668 | 724 | enum kvm_mr_change change); |
---|
669 | | -bool kvm_largepages_enabled(void); |
---|
670 | | -void kvm_disable_largepages(void); |
---|
671 | 725 | /* flush all memory translations */ |
---|
672 | 726 | void kvm_arch_flush_shadow_all(struct kvm *kvm); |
---|
673 | 727 | /* flush memory translations pointing to 'slot' */ |
---|
.. | .. |
---|
687 | 741 | void kvm_release_page_dirty(struct page *page); |
---|
688 | 742 | void kvm_set_page_accessed(struct page *page); |
---|
689 | 743 | |
---|
690 | | -kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
---|
691 | 744 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
---|
692 | 745 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
---|
693 | 746 | bool *writable); |
---|
.. | .. |
---|
706 | 759 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); |
---|
707 | 760 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
---|
708 | 761 | int len); |
---|
709 | | -int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
---|
710 | | - unsigned long len); |
---|
711 | 762 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
---|
712 | 763 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
---|
713 | 764 | void *data, unsigned long len); |
---|
| 765 | +int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
---|
| 766 | + void *data, unsigned int offset, |
---|
| 767 | + unsigned long len); |
---|
714 | 768 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
---|
715 | 769 | int offset, int len); |
---|
716 | 770 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
---|
.. | .. |
---|
722 | 776 | unsigned long len); |
---|
723 | 777 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
---|
724 | 778 | gpa_t gpa, unsigned long len); |
---|
| 779 | + |
---|
| 780 | +#define __kvm_get_guest(kvm, gfn, offset, v) \ |
---|
| 781 | +({ \ |
---|
| 782 | + unsigned long __addr = gfn_to_hva(kvm, gfn); \ |
---|
| 783 | + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ |
---|
| 784 | + int __ret = -EFAULT; \ |
---|
| 785 | + \ |
---|
| 786 | + if (!kvm_is_error_hva(__addr)) \ |
---|
| 787 | + __ret = get_user(v, __uaddr); \ |
---|
| 788 | + __ret; \ |
---|
| 789 | +}) |
---|
| 790 | + |
---|
| 791 | +#define kvm_get_guest(kvm, gpa, v) \ |
---|
| 792 | +({ \ |
---|
| 793 | + gpa_t __gpa = gpa; \ |
---|
| 794 | + struct kvm *__kvm = kvm; \ |
---|
| 795 | + \ |
---|
| 796 | + __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ |
---|
| 797 | + offset_in_page(__gpa), v); \ |
---|
| 798 | +}) |
---|
| 799 | + |
---|
| 800 | +#define __kvm_put_guest(kvm, gfn, offset, v) \ |
---|
| 801 | +({ \ |
---|
| 802 | + unsigned long __addr = gfn_to_hva(kvm, gfn); \ |
---|
| 803 | + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ |
---|
| 804 | + int __ret = -EFAULT; \ |
---|
| 805 | + \ |
---|
| 806 | + if (!kvm_is_error_hva(__addr)) \ |
---|
| 807 | + __ret = put_user(v, __uaddr); \ |
---|
| 808 | + if (!__ret) \ |
---|
| 809 | + mark_page_dirty(kvm, gfn); \ |
---|
| 810 | + __ret; \ |
---|
| 811 | +}) |
---|
| 812 | + |
---|
| 813 | +#define kvm_put_guest(kvm, gpa, v) \ |
---|
| 814 | +({ \ |
---|
| 815 | + gpa_t __gpa = gpa; \ |
---|
| 816 | + struct kvm *__kvm = kvm; \ |
---|
| 817 | + \ |
---|
| 818 | + __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ |
---|
| 819 | + offset_in_page(__gpa), v); \ |
---|
| 820 | +}) |
---|
| 821 | + |
---|
725 | 822 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
---|
726 | 823 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
---|
727 | 824 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
---|
728 | 825 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
---|
| 826 | +bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
---|
729 | 827 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); |
---|
| 828 | +void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); |
---|
730 | 829 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
---|
731 | 830 | |
---|
732 | 831 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); |
---|
.. | .. |
---|
768 | 867 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
---|
769 | 868 | void kvm_reload_remote_mmus(struct kvm *kvm); |
---|
770 | 869 | |
---|
| 870 | +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
---|
| 871 | +int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); |
---|
| 872 | +int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); |
---|
| 873 | +void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); |
---|
| 874 | +void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); |
---|
| 875 | +#endif |
---|
| 876 | + |
---|
771 | 877 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
---|
| 878 | + struct kvm_vcpu *except, |
---|
772 | 879 | unsigned long *vcpu_bitmap, cpumask_var_t tmp); |
---|
773 | | -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
---|
| 880 | +bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
---|
| 881 | + struct kvm_vcpu *except); |
---|
| 882 | +bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, |
---|
| 883 | + unsigned long *vcpu_bitmap); |
---|
774 | 884 | |
---|
775 | 885 | long kvm_arch_dev_ioctl(struct file *filp, |
---|
776 | 886 | unsigned int ioctl, unsigned long arg); |
---|
.. | .. |
---|
780 | 890 | |
---|
781 | 891 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
---|
782 | 892 | |
---|
783 | | -int kvm_get_dirty_log(struct kvm *kvm, |
---|
784 | | - struct kvm_dirty_log *log, int *is_dirty); |
---|
785 | | - |
---|
786 | | -int kvm_get_dirty_log_protect(struct kvm *kvm, |
---|
787 | | - struct kvm_dirty_log *log, bool *is_dirty); |
---|
788 | | - |
---|
789 | 893 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
---|
790 | 894 | struct kvm_memory_slot *slot, |
---|
791 | 895 | gfn_t gfn_offset, |
---|
792 | 896 | unsigned long mask); |
---|
| 897 | +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); |
---|
793 | 898 | |
---|
794 | | -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
---|
795 | | - struct kvm_dirty_log *log); |
---|
| 899 | +#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
---|
| 900 | +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, |
---|
| 901 | + struct kvm_memory_slot *memslot); |
---|
| 902 | +#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
---|
| 903 | +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); |
---|
| 904 | +int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, |
---|
| 905 | + int *is_dirty, struct kvm_memory_slot **memslot); |
---|
| 906 | +#endif |
---|
796 | 907 | |
---|
797 | 908 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
---|
798 | 909 | bool line_status); |
---|
| 910 | +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
---|
| 911 | + struct kvm_enable_cap *cap); |
---|
799 | 912 | long kvm_arch_vm_ioctl(struct file *filp, |
---|
800 | 913 | unsigned int ioctl, unsigned long arg); |
---|
| 914 | +long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, |
---|
| 915 | + unsigned long arg); |
---|
801 | 916 | |
---|
802 | 917 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
---|
803 | 918 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
---|
.. | .. |
---|
817 | 932 | struct kvm_mp_state *mp_state); |
---|
818 | 933 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
---|
819 | 934 | struct kvm_guest_debug *dbg); |
---|
820 | | -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
---|
| 935 | +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); |
---|
821 | 936 | |
---|
822 | 937 | int kvm_arch_init(void *opaque); |
---|
823 | 938 | void kvm_arch_exit(void); |
---|
824 | 939 | |
---|
825 | | -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
---|
826 | | -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
---|
827 | | - |
---|
828 | 940 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
---|
829 | 941 | |
---|
830 | | -void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); |
---|
831 | 942 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
---|
832 | 943 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
---|
833 | | -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
---|
834 | | -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
---|
| 944 | +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); |
---|
| 945 | +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); |
---|
835 | 946 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
---|
836 | 947 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
---|
837 | 948 | |
---|
838 | | -bool kvm_arch_has_vcpu_debugfs(void); |
---|
839 | | -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); |
---|
| 949 | +#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
---|
| 950 | +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); |
---|
| 951 | +#endif |
---|
840 | 952 | |
---|
841 | 953 | int kvm_arch_hardware_enable(void); |
---|
842 | 954 | void kvm_arch_hardware_disable(void); |
---|
843 | | -int kvm_arch_hardware_setup(void); |
---|
| 955 | +int kvm_arch_hardware_setup(void *opaque); |
---|
844 | 956 | void kvm_arch_hardware_unsetup(void); |
---|
845 | | -void kvm_arch_check_processor_compat(void *rtn); |
---|
| 957 | +int kvm_arch_check_processor_compat(void *opaque); |
---|
846 | 958 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
---|
847 | 959 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); |
---|
848 | 960 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
---|
849 | 961 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); |
---|
| 962 | +int kvm_arch_post_init_vm(struct kvm *kvm); |
---|
| 963 | +void kvm_arch_pre_destroy_vm(struct kvm *kvm); |
---|
850 | 964 | |
---|
851 | 965 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
---|
852 | 966 | /* |
---|
.. | .. |
---|
902 | 1016 | { |
---|
903 | 1017 | } |
---|
904 | 1018 | |
---|
905 | | -static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) |
---|
| 1019 | +static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) |
---|
906 | 1020 | { |
---|
907 | 1021 | return false; |
---|
908 | 1022 | } |
---|
909 | 1023 | #endif |
---|
910 | 1024 | |
---|
911 | | -static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
---|
| 1025 | +static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) |
---|
912 | 1026 | { |
---|
913 | 1027 | #ifdef __KVM_HAVE_ARCH_WQP |
---|
914 | | - return vcpu->arch.wqp; |
---|
| 1028 | + return vcpu->arch.waitp; |
---|
915 | 1029 | #else |
---|
916 | | - return &vcpu->wq; |
---|
| 1030 | + return &vcpu->wait; |
---|
917 | 1031 | #endif |
---|
918 | 1032 | } |
---|
919 | 1033 | |
---|
.. | .. |
---|
936 | 1050 | void kvm_arch_sync_events(struct kvm *kvm); |
---|
937 | 1051 | |
---|
938 | 1052 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
---|
939 | | -void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
---|
940 | 1053 | |
---|
941 | 1054 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn); |
---|
942 | 1055 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); |
---|
| 1056 | +bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); |
---|
943 | 1057 | |
---|
944 | 1058 | struct kvm_irq_ack_notifier { |
---|
945 | 1059 | struct hlist_node link; |
---|
.. | .. |
---|
967 | 1081 | struct kvm_irq_ack_notifier *kian); |
---|
968 | 1082 | int kvm_request_irq_source_id(struct kvm *kvm); |
---|
969 | 1083 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
---|
| 1084 | +bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); |
---|
970 | 1085 | |
---|
971 | 1086 | /* |
---|
972 | 1087 | * search_memslots() and __gfn_to_memslot() are here because they are |
---|
973 | 1088 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. |
---|
974 | 1089 | * gfn_to_memslot() itself isn't here as an inline because that would |
---|
975 | 1090 | * bloat other code too much. |
---|
| 1091 | + * |
---|
| 1092 | + * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! |
---|
976 | 1093 | */ |
---|
977 | 1094 | static inline struct kvm_memory_slot * |
---|
978 | 1095 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) |
---|
.. | .. |
---|
980 | 1097 | int start = 0, end = slots->used_slots; |
---|
981 | 1098 | int slot = atomic_read(&slots->lru_slot); |
---|
982 | 1099 | struct kvm_memory_slot *memslots = slots->memslots; |
---|
| 1100 | + |
---|
| 1101 | + if (unlikely(!slots->used_slots)) |
---|
| 1102 | + return NULL; |
---|
983 | 1103 | |
---|
984 | 1104 | if (gfn >= memslots[slot].base_gfn && |
---|
985 | 1105 | gfn < memslots[slot].base_gfn + memslots[slot].npages) |
---|
.. | .. |
---|
1070 | 1190 | }; |
---|
1071 | 1191 | |
---|
1072 | 1192 | struct kvm_stat_data { |
---|
1073 | | - int offset; |
---|
1074 | | - int mode; |
---|
1075 | 1193 | struct kvm *kvm; |
---|
| 1194 | + struct kvm_stats_debugfs_item *dbgfs_item; |
---|
1076 | 1195 | }; |
---|
1077 | 1196 | |
---|
1078 | 1197 | struct kvm_stats_debugfs_item { |
---|
.. | .. |
---|
1081 | 1200 | enum kvm_stat_kind kind; |
---|
1082 | 1201 | int mode; |
---|
1083 | 1202 | }; |
---|
| 1203 | + |
---|
| 1204 | +#define KVM_DBGFS_GET_MODE(dbgfs_item) \ |
---|
| 1205 | + ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) |
---|
| 1206 | + |
---|
| 1207 | +#define VM_STAT(n, x, ...) \ |
---|
| 1208 | + { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } |
---|
| 1209 | +#define VCPU_STAT(n, x, ...) \ |
---|
| 1210 | + { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } |
---|
| 1211 | + |
---|
1084 | 1212 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
---|
1085 | 1213 | extern struct dentry *kvm_debugfs_dir; |
---|
1086 | 1214 | |
---|
.. | .. |
---|
1217 | 1345 | |
---|
1218 | 1346 | extern unsigned int halt_poll_ns; |
---|
1219 | 1347 | extern unsigned int halt_poll_ns_grow; |
---|
| 1348 | +extern unsigned int halt_poll_ns_grow_start; |
---|
1220 | 1349 | extern unsigned int halt_poll_ns_shrink; |
---|
1221 | 1350 | |
---|
1222 | 1351 | struct kvm_device { |
---|
1223 | | - struct kvm_device_ops *ops; |
---|
| 1352 | + const struct kvm_device_ops *ops; |
---|
1224 | 1353 | struct kvm *kvm; |
---|
1225 | 1354 | void *private; |
---|
1226 | 1355 | struct list_head vm_node; |
---|
.. | .. |
---|
1253 | 1382 | */ |
---|
1254 | 1383 | void (*destroy)(struct kvm_device *dev); |
---|
1255 | 1384 | |
---|
| 1385 | + /* |
---|
| 1386 | + * Release is an alternative method to free the device. It is |
---|
| 1387 | + * called when the device file descriptor is closed. Once |
---|
| 1388 | + * release is called, the destroy method will not be called |
---|
| 1389 | + * anymore as the device is removed from the device list of |
---|
| 1390 | + * the VM. kvm->lock is held. |
---|
| 1391 | + */ |
---|
| 1392 | + void (*release)(struct kvm_device *dev); |
---|
| 1393 | + |
---|
1256 | 1394 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
---|
1257 | 1395 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
---|
1258 | 1396 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
---|
1259 | 1397 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, |
---|
1260 | 1398 | unsigned long arg); |
---|
| 1399 | + int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); |
---|
1261 | 1400 | }; |
---|
1262 | 1401 | |
---|
1263 | 1402 | void kvm_device_get(struct kvm_device *dev); |
---|
1264 | 1403 | void kvm_device_put(struct kvm_device *dev); |
---|
1265 | 1404 | struct kvm_device *kvm_device_from_filp(struct file *filp); |
---|
1266 | | -int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); |
---|
| 1405 | +int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); |
---|
1267 | 1406 | void kvm_unregister_device_ops(u32 type); |
---|
1268 | 1407 | |
---|
1269 | 1408 | extern struct kvm_device_ops kvm_mpic_ops; |
---|
.. | .. |
---|
1292 | 1431 | } |
---|
1293 | 1432 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
---|
1294 | 1433 | |
---|
| 1434 | +static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) |
---|
| 1435 | +{ |
---|
| 1436 | + return (memslot && memslot->id < KVM_USER_MEM_SLOTS && |
---|
| 1437 | + !(memslot->flags & KVM_MEMSLOT_INVALID)); |
---|
| 1438 | +} |
---|
| 1439 | + |
---|
| 1440 | +struct kvm_vcpu *kvm_get_running_vcpu(void); |
---|
| 1441 | +struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
---|
| 1442 | + |
---|
1295 | 1443 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
---|
1296 | 1444 | bool kvm_arch_has_irq_bypass(void); |
---|
1297 | 1445 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
---|
.. | .. |
---|
1318 | 1466 | } |
---|
1319 | 1467 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ |
---|
1320 | 1468 | |
---|
| 1469 | +#ifdef CONFIG_HAVE_KVM_NO_POLL |
---|
| 1470 | +/* Callback that tells if we must not poll */ |
---|
| 1471 | +bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); |
---|
| 1472 | +#else |
---|
| 1473 | +static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) |
---|
| 1474 | +{ |
---|
| 1475 | + return false; |
---|
| 1476 | +} |
---|
| 1477 | +#endif /* CONFIG_HAVE_KVM_NO_POLL */ |
---|
| 1478 | + |
---|
1321 | 1479 | #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL |
---|
1322 | 1480 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
---|
1323 | 1481 | unsigned int ioctl, unsigned long arg); |
---|
.. | .. |
---|
1333 | 1491 | void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
---|
1334 | 1492 | unsigned long start, unsigned long end); |
---|
1335 | 1493 | |
---|
| 1494 | +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); |
---|
| 1495 | + |
---|
1336 | 1496 | #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE |
---|
1337 | 1497 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); |
---|
1338 | 1498 | #else |
---|
.. | .. |
---|
1348 | 1508 | uintptr_t data, const char *name, |
---|
1349 | 1509 | struct task_struct **thread_ptr); |
---|
1350 | 1510 | |
---|
| 1511 | +#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK |
---|
| 1512 | +static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) |
---|
| 1513 | +{ |
---|
| 1514 | + vcpu->run->exit_reason = KVM_EXIT_INTR; |
---|
| 1515 | + vcpu->stat.signal_exits++; |
---|
| 1516 | +} |
---|
| 1517 | +#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ |
---|
| 1518 | + |
---|
1351 | 1519 | #endif |
---|