hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/arch/arm64/include/asm/kvm_emulate.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2012,2013 - ARM Ltd
34 * Author: Marc Zyngier <marc.zyngier@arm.com>
....@@ -5,18 +6,6 @@
56 * Derived from arch/arm/include/kvm_emulate.h
67 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
78 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
209 */
2110
2211 #ifndef __ARM64_KVM_EMULATE_H__
....@@ -24,30 +13,35 @@
2413
2514 #include <linux/kvm_host.h>
2615
16
+#include <asm/debug-monitors.h>
2717 #include <asm/esr.h>
2818 #include <asm/kvm_arm.h>
2919 #include <asm/kvm_hyp.h>
30
-#include <asm/kvm_mmio.h>
3120 #include <asm/ptrace.h>
3221 #include <asm/cputype.h>
3322 #include <asm/virt.h>
3423
35
-unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
36
-unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
37
-void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
24
+#define CURRENT_EL_SP_EL0_VECTOR 0x0
25
+#define CURRENT_EL_SP_ELx_VECTOR 0x200
26
+#define LOWER_EL_AArch64_VECTOR 0x400
27
+#define LOWER_EL_AArch32_VECTOR 0x600
28
+
29
+enum exception_type {
30
+ except_type_sync = 0,
31
+ except_type_irq = 0x80,
32
+ except_type_fiq = 0x100,
33
+ except_type_serror = 0x180,
34
+};
3835
3936 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
40
-void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
37
+void kvm_skip_instr32(struct kvm_vcpu *vcpu);
4138
4239 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4340 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
4441 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4542 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
46
-void kvm_inject_undef32(struct kvm_vcpu *vcpu);
47
-void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
48
-void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
4943
50
-static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
44
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
5145 {
5246 return !(vcpu->arch.hcr_el2 & HCR_RW);
5347 }
....@@ -63,8 +57,18 @@
6357 /* trap error record accesses */
6458 vcpu->arch.hcr_el2 |= HCR_TERR;
6559 }
66
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
60
+
61
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
6762 vcpu->arch.hcr_el2 |= HCR_FWB;
63
+ } else {
64
+ /*
65
+ * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
66
+ * get set in SCTLR_EL1 such that we can detect when the guest
67
+ * MMU gets turned on and do the necessary cache maintenance
68
+ * then.
69
+ */
70
+ vcpu->arch.hcr_el2 |= HCR_TVM;
71
+ }
6872
6973 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
7074 vcpu->arch.hcr_el2 &= ~HCR_RW;
....@@ -76,6 +80,10 @@
7680 */
7781 if (!vcpu_el1_is_32bit(vcpu))
7882 vcpu->arch.hcr_el2 |= HCR_TID3;
83
+
84
+ if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
85
+ vcpu_el1_is_32bit(vcpu))
86
+ vcpu->arch.hcr_el2 |= HCR_TID2;
7987 }
8088
8189 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
....@@ -83,14 +91,30 @@
8391 return (unsigned long *)&vcpu->arch.hcr_el2;
8492 }
8593
86
-static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
94
+static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
8795 {
8896 vcpu->arch.hcr_el2 &= ~HCR_TWE;
97
+ if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
98
+ vcpu->kvm->arch.vgic.nassgireq)
99
+ vcpu->arch.hcr_el2 &= ~HCR_TWI;
100
+ else
101
+ vcpu->arch.hcr_el2 |= HCR_TWI;
89102 }
90103
91
-static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
104
+static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
92105 {
93106 vcpu->arch.hcr_el2 |= HCR_TWE;
107
+ vcpu->arch.hcr_el2 |= HCR_TWI;
108
+}
109
+
110
+static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
111
+{
112
+ vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
113
+}
114
+
115
+static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
116
+{
117
+ vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
94118 }
95119
96120 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
....@@ -103,56 +127,27 @@
103127 vcpu->arch.vsesr_el2 = vsesr;
104128 }
105129
106
-static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
130
+static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
107131 {
108
- return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
132
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
109133 }
110134
111
-static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
135
+static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
112136 {
113
- return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
137
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
114138 }
115139
116
-static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
117
-{
118
- if (vcpu->arch.sysregs_loaded_on_cpu)
119
- return read_sysreg_el1(elr);
120
- else
121
- return *__vcpu_elr_el1(vcpu);
122
-}
123
-
124
-static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
125
-{
126
- if (vcpu->arch.sysregs_loaded_on_cpu)
127
- write_sysreg_el1(v, elr);
128
- else
129
- *__vcpu_elr_el1(vcpu) = v;
130
-}
131
-
132
-static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
133
-{
134
- return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
135
-}
136
-
137
-static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
140
+static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
138141 {
139142 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
140143 }
141144
142
-static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
145
+static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
143146 {
144147 if (vcpu_mode_is_32bit(vcpu))
145148 return kvm_condition_valid32(vcpu);
146149
147150 return true;
148
-}
149
-
150
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
151
-{
152
- if (vcpu_mode_is_32bit(vcpu))
153
- kvm_skip_instr32(vcpu, is_wide_instr);
154
- else
155
- *vcpu_pc(vcpu) += 4;
156151 }
157152
158153 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
....@@ -165,41 +160,17 @@
165160 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
166161 * AArch32 with banked registers.
167162 */
168
-static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
163
+static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
169164 u8 reg_num)
170165 {
171
- return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
166
+ return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
172167 }
173168
174
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
169
+static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
175170 unsigned long val)
176171 {
177172 if (reg_num != 31)
178
- vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
179
-}
180
-
181
-static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
182
-{
183
- if (vcpu_mode_is_32bit(vcpu))
184
- return vcpu_read_spsr32(vcpu);
185
-
186
- if (vcpu->arch.sysregs_loaded_on_cpu)
187
- return read_sysreg_el1(spsr);
188
- else
189
- return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
190
-}
191
-
192
-static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
193
-{
194
- if (vcpu_mode_is_32bit(vcpu)) {
195
- vcpu_write_spsr32(vcpu, v);
196
- return;
197
- }
198
-
199
- if (vcpu->arch.sysregs_loaded_on_cpu)
200
- write_sysreg_el1(v, spsr);
201
- else
202
- vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
173
+ vcpu_gp_regs(vcpu)->regs[reg_num] = val;
203174 }
204175
205176 /*
....@@ -248,14 +219,14 @@
248219 return mode != PSR_MODE_EL0t;
249220 }
250221
251
-static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
222
+static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
252223 {
253224 return vcpu->arch.fault.esr_el2;
254225 }
255226
256
-static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
227
+static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
257228 {
258
- u32 esr = kvm_vcpu_get_hsr(vcpu);
229
+ u32 esr = kvm_vcpu_get_esr(vcpu);
259230
260231 if (esr & ESR_ELx_CV)
261232 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
....@@ -263,12 +234,12 @@
263234 return -1;
264235 }
265236
266
-static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
237
+static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
267238 {
268239 return vcpu->arch.fault.far_el2;
269240 }
270241
271
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
242
+static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
272243 {
273244 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
274245 }
....@@ -280,59 +251,64 @@
280251
281252 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
282253 {
283
- return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
254
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
284255 }
285256
286
-static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
257
+static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
287258 {
288
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
259
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
260
+}
261
+
262
+static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
263
+{
264
+ return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
289265 }
290266
291267 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
292268 {
293
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
269
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
294270 }
295271
296272 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
297273 {
298
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
274
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
299275 }
300276
301
-static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
277
+static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
302278 {
303
- return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
279
+ return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
304280 }
305281
306
-static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
282
+static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
307283 {
308
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
284
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
309285 }
310286
311
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
287
+/* Always check for S1PTW *before* using this. */
288
+static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
312289 {
313
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
314
- kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
290
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
315291 }
316292
317293 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
318294 {
319
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
295
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
320296 }
321297
322
-static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
298
+static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
323299 {
324
- return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
300
+ return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
325301 }
326302
327303 /* This one is not specific to Data Abort */
328
-static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
304
+static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
329305 {
330
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
306
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
331307 }
332308
333
-static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
309
+static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
334310 {
335
- return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
311
+ return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
336312 }
337313
338314 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
....@@ -345,17 +321,22 @@
345321 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
346322 }
347323
348
-static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
324
+static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
349325 {
350
- return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
326
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
351327 }
352328
353
-static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
329
+static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
354330 {
355
- return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
331
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
356332 }
357333
358
-static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
334
+static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
335
+{
336
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
337
+}
338
+
339
+static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
359340 {
360341 switch (kvm_vcpu_trap_get_fault(vcpu)) {
361342 case FSC_SEA:
....@@ -374,10 +355,21 @@
374355 }
375356 }
376357
377
-static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
358
+static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
378359 {
379
- u32 esr = kvm_vcpu_get_hsr(vcpu);
380
- return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
360
+ u32 esr = kvm_vcpu_get_esr(vcpu);
361
+ return ESR_ELx_SYS64_ISS_RT(esr);
362
+}
363
+
364
+static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
365
+{
366
+ if (kvm_vcpu_abt_iss1tw(vcpu))
367
+ return true;
368
+
369
+ if (kvm_vcpu_trap_is_iabt(vcpu))
370
+ return false;
371
+
372
+ return kvm_vcpu_dabt_iswrite(vcpu);
381373 }
382374
383375 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
....@@ -466,4 +458,14 @@
466458 return data; /* Leave LE untouched */
467459 }
468460
461
+static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
462
+{
463
+ vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
464
+}
465
+
466
+static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
467
+{
468
+ return test_bit(feature, vcpu->arch.features);
469
+}
470
+
469471 #endif /* __ARM64_KVM_EMULATE_H__ */