hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/kvm/kvm_cache_regs.h
....@@ -2,27 +2,85 @@
22 #ifndef ASM_KVM_CACHE_REGS_H
33 #define ASM_KVM_CACHE_REGS_H
44
5
+#include <linux/kvm_host.h>
6
+
57 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
68 #define KVM_POSSIBLE_CR4_GUEST_BITS \
79 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
8
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
10
+ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
911
10
-static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
11
- enum kvm_reg reg)
12
+#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
13
+static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14
+{ \
15
+ return vcpu->arch.regs[VCPU_REGS_##uname]; \
16
+} \
17
+static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
18
+ unsigned long val) \
19
+{ \
20
+ vcpu->arch.regs[VCPU_REGS_##uname] = val; \
21
+}
22
+BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23
+BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24
+BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25
+BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26
+BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27
+BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28
+BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29
+#ifdef CONFIG_X86_64
30
+BUILD_KVM_GPR_ACCESSORS(r8, R8)
31
+BUILD_KVM_GPR_ACCESSORS(r9, R9)
32
+BUILD_KVM_GPR_ACCESSORS(r10, R10)
33
+BUILD_KVM_GPR_ACCESSORS(r11, R11)
34
+BUILD_KVM_GPR_ACCESSORS(r12, R12)
35
+BUILD_KVM_GPR_ACCESSORS(r13, R13)
36
+BUILD_KVM_GPR_ACCESSORS(r14, R14)
37
+BUILD_KVM_GPR_ACCESSORS(r15, R15)
38
+#endif
39
+
40
+static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
41
+ enum kvm_reg reg)
1242 {
13
- if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
14
- kvm_x86_ops->cache_reg(vcpu, reg);
43
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
44
+}
45
+
46
+static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
47
+ enum kvm_reg reg)
48
+{
49
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
50
+}
51
+
52
+static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
53
+ enum kvm_reg reg)
54
+{
55
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
56
+}
57
+
58
+static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
59
+ enum kvm_reg reg)
60
+{
61
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
62
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63
+}
64
+
65
+static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
66
+{
67
+ if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
68
+ return 0;
69
+
70
+ if (!kvm_register_is_available(vcpu, reg))
71
+ kvm_x86_ops.cache_reg(vcpu, reg);
1572
1673 return vcpu->arch.regs[reg];
1774 }
1875
19
-static inline void kvm_register_write(struct kvm_vcpu *vcpu,
20
- enum kvm_reg reg,
76
+static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
2177 unsigned long val)
2278 {
79
+ if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
80
+ return;
81
+
2382 vcpu->arch.regs[reg] = val;
24
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
25
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
83
+ kvm_register_mark_dirty(vcpu, reg);
2684 }
2785
2886 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
....@@ -35,13 +93,22 @@
3593 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
3694 }
3795
96
+static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
97
+{
98
+ return kvm_register_read(vcpu, VCPU_REGS_RSP);
99
+}
100
+
101
+static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
102
+{
103
+ kvm_register_write(vcpu, VCPU_REGS_RSP, val);
104
+}
105
+
38106 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
39107 {
40108 might_sleep(); /* on svm */
41109
42
- if (!test_bit(VCPU_EXREG_PDPTR,
43
- (unsigned long *)&vcpu->arch.regs_avail))
44
- kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
110
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
111
+ kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
45112
46113 return vcpu->arch.walk_mmu->pdptrs[index];
47114 }
....@@ -49,8 +116,9 @@
49116 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
50117 {
51118 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
52
- if (tmask & vcpu->arch.cr0_guest_owned_bits)
53
- kvm_x86_ops->decache_cr0_guest_bits(vcpu);
119
+ if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
120
+ !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
121
+ kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
54122 return vcpu->arch.cr0 & mask;
55123 }
56124
....@@ -62,15 +130,16 @@
62130 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
63131 {
64132 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
65
- if (tmask & vcpu->arch.cr4_guest_owned_bits)
66
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
133
+ if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
134
+ !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
135
+ kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
67136 return vcpu->arch.cr4 & mask;
68137 }
69138
70139 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
71140 {
72
- if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
73
- kvm_x86_ops->decache_cr3(vcpu);
141
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
142
+ kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
74143 return vcpu->arch.cr3;
75144 }
76145
....@@ -81,8 +150,8 @@
81150
82151 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
83152 {
84
- return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
85
- | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
153
+ return (kvm_rax_read(vcpu) & -1u)
154
+ | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
86155 }
87156
88157 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)