hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/kvm/mmu.h
....@@ -4,6 +4,7 @@
44
55 #include <linux/kvm_host.h>
66 #include "kvm_cache_regs.h"
7
+#include "cpuid.h"
78
89 #define PT64_PT_BITS 9
910 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
....@@ -43,11 +44,6 @@
4344 #define PT32_ROOT_LEVEL 2
4445 #define PT32E_ROOT_LEVEL 3
4546
46
-#define PT_PDPE_LEVEL 3
47
-#define PT_DIRECTORY_LEVEL 2
48
-#define PT_PAGE_TABLE_LEVEL 1
49
-#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
50
-
5147 static inline u64 rsvd_bits(int s, int e)
5248 {
5349 if (e < s)
....@@ -56,31 +52,23 @@
5652 return ((2ULL << (e - s)) - 1) << s;
5753 }
5854
59
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
55
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
6056
6157 void
6258 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
6359
6460 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
65
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
61
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
62
+ gpa_t nested_cr3);
6663 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
6764 bool accessed_dirty, gpa_t new_eptp);
6865 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
6966 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
7067 u64 fault_address, char *insn, int insn_len);
7168
72
-static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
73
-{
74
- if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
75
- return kvm->arch.n_max_mmu_pages -
76
- kvm->arch.n_used_mmu_pages;
77
-
78
- return 0;
79
-}
80
-
8169 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
8270 {
83
- if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
71
+ if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
8472 return 0;
8573
8674 return kvm_mmu_load(vcpu);
....@@ -100,11 +88,28 @@
10088 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
10189 }
10290
103
-static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
91
+static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
10492 {
105
- if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
106
- vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa |
107
- kvm_get_active_pcid(vcpu));
93
+ u64 root_hpa = vcpu->arch.mmu->root_hpa;
94
+
95
+ if (!VALID_PAGE(root_hpa))
96
+ return;
97
+
98
+ kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
99
+ vcpu->arch.mmu->shadow_root_level);
100
+}
101
+
102
+int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
103
+ bool prefault);
104
+
105
+static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
106
+ u32 err, bool prefault)
107
+{
108
+#ifdef CONFIG_RETPOLINE
109
+ if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
110
+ return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
111
+#endif
112
+ return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
108113 }
109114
110115 /*
....@@ -162,8 +167,8 @@
162167 unsigned pte_access, unsigned pte_pkey,
163168 unsigned pfec)
164169 {
165
- int cpl = kvm_x86_ops->get_cpl(vcpu);
166
- unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
170
+ int cpl = kvm_x86_ops.get_cpl(vcpu);
171
+ unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
167172
168173 /*
169174 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
....@@ -208,14 +213,9 @@
208213 return -(u32)fault & errcode;
209214 }
210215
211
-void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
212216 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
213217
214
-void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
215
-void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
216
-bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
217
- struct kvm_memory_slot *slot, u64 gfn);
218
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
218
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
219219
220220 int kvm_mmu_post_init_vm(struct kvm *kvm);
221221 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);