.. | .. |
---|
4 | 4 | |
---|
5 | 5 | #include <linux/kvm_host.h> |
---|
6 | 6 | #include "kvm_cache_regs.h" |
---|
| 7 | +#include "cpuid.h" |
---|
7 | 8 | |
---|
8 | 9 | #define PT64_PT_BITS 9 |
---|
9 | 10 | #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) |
---|
.. | .. |
---|
43 | 44 | #define PT32_ROOT_LEVEL 2 |
---|
44 | 45 | #define PT32E_ROOT_LEVEL 3 |
---|
45 | 46 | |
---|
46 | | -#define PT_PDPE_LEVEL 3 |
---|
47 | | -#define PT_DIRECTORY_LEVEL 2 |
---|
48 | | -#define PT_PAGE_TABLE_LEVEL 1 |
---|
49 | | -#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1) |
---|
50 | | - |
---|
51 | 47 | static inline u64 rsvd_bits(int s, int e) |
---|
52 | 48 | { |
---|
53 | 49 | if (e < s) |
---|
.. | .. |
---|
56 | 52 | return ((2ULL << (e - s)) - 1) << s; |
---|
57 | 53 | } |
---|
58 | 54 | |
---|
59 | | -void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value); |
---|
| 55 | +void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask); |
---|
60 | 56 | |
---|
61 | 57 | void |
---|
62 | 58 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); |
---|
63 | 59 | |
---|
64 | 60 | void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots); |
---|
65 | | -void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); |
---|
| 61 | +void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, |
---|
| 62 | + gpa_t nested_cr3); |
---|
66 | 63 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, |
---|
67 | 64 | bool accessed_dirty, gpa_t new_eptp); |
---|
68 | 65 | bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); |
---|
69 | 66 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
---|
70 | 67 | u64 fault_address, char *insn, int insn_len); |
---|
71 | 68 | |
---|
72 | | -static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) |
---|
73 | | -{ |
---|
74 | | - if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) |
---|
75 | | - return kvm->arch.n_max_mmu_pages - |
---|
76 | | - kvm->arch.n_used_mmu_pages; |
---|
77 | | - |
---|
78 | | - return 0; |
---|
79 | | -} |
---|
80 | | - |
---|
81 | 69 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
---|
82 | 70 | { |
---|
83 | | - if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) |
---|
| 71 | + if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) |
---|
84 | 72 | return 0; |
---|
85 | 73 | |
---|
86 | 74 | return kvm_mmu_load(vcpu); |
---|
.. | .. |
---|
100 | 88 | return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); |
---|
101 | 89 | } |
---|
102 | 90 | |
---|
103 | | -static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) |
---|
| 91 | +static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) |
---|
104 | 92 | { |
---|
105 | | - if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
---|
106 | | - vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa | |
---|
107 | | - kvm_get_active_pcid(vcpu)); |
---|
| 93 | + u64 root_hpa = vcpu->arch.mmu->root_hpa; |
---|
| 94 | + |
---|
| 95 | + if (!VALID_PAGE(root_hpa)) |
---|
| 96 | + return; |
---|
| 97 | + |
---|
| 98 | + kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu), |
---|
| 99 | + vcpu->arch.mmu->shadow_root_level); |
---|
| 100 | +} |
---|
| 101 | + |
---|
| 102 | +int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
---|
| 103 | + bool prefault); |
---|
| 104 | + |
---|
| 105 | +static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
---|
| 106 | + u32 err, bool prefault) |
---|
| 107 | +{ |
---|
| 108 | +#ifdef CONFIG_RETPOLINE |
---|
| 109 | + if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault)) |
---|
| 110 | + return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault); |
---|
| 111 | +#endif |
---|
| 112 | + return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault); |
---|
108 | 113 | } |
---|
109 | 114 | |
---|
110 | 115 | /* |
---|
.. | .. |
---|
162 | 167 | unsigned pte_access, unsigned pte_pkey, |
---|
163 | 168 | unsigned pfec) |
---|
164 | 169 | { |
---|
165 | | - int cpl = kvm_x86_ops->get_cpl(vcpu); |
---|
166 | | - unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); |
---|
| 170 | + int cpl = kvm_x86_ops.get_cpl(vcpu); |
---|
| 171 | + unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); |
---|
167 | 172 | |
---|
168 | 173 | /* |
---|
169 | 174 | * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. |
---|
.. | .. |
---|
208 | 213 | return -(u32)fault & errcode; |
---|
209 | 214 | } |
---|
210 | 215 | |
---|
211 | | -void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); |
---|
212 | 216 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); |
---|
213 | 217 | |
---|
214 | | -void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); |
---|
215 | | -void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); |
---|
216 | | -bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, |
---|
217 | | - struct kvm_memory_slot *slot, u64 gfn); |
---|
218 | | -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa); |
---|
| 218 | +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); |
---|
219 | 219 | |
---|
220 | 220 | int kvm_mmu_post_init_vm(struct kvm *kvm); |
---|
221 | 221 | void kvm_mmu_pre_destroy_vm(struct kvm *kvm); |
---|