hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/arm64/include/asm/kvm_asm.h
....@@ -1,91 +1,212 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2012,2013 - ARM Ltd
34 * Author: Marc Zyngier <marc.zyngier@arm.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify
6
- * it under the terms of the GNU General Public License version 2 as
7
- * published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
187 #ifndef __ARM_KVM_ASM_H__
198 #define __ARM_KVM_ASM_H__
209
10
+#include <asm/hyp_image.h>
2111 #include <asm/virt.h>
22
-
23
-#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
24
-#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
2512
2613 #define ARM_EXIT_WITH_SERROR_BIT 31
2714 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15
+#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
2816 #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
2917
3018 #define ARM_EXCEPTION_IRQ 0
3119 #define ARM_EXCEPTION_EL1_SERROR 1
3220 #define ARM_EXCEPTION_TRAP 2
21
+#define ARM_EXCEPTION_IL 3
3322 /* The hyp-stub will return this for any kvm_call_hyp() call */
3423 #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
24
+
25
+#define kvm_arm_exception_type \
26
+ {ARM_EXCEPTION_IRQ, "IRQ" }, \
27
+ {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
28
+ {ARM_EXCEPTION_TRAP, "TRAP" }, \
29
+ {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
30
+
31
+/*
32
+ * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
33
+ * that jumps over this.
34
+ */
35
+#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
36
+
37
+#define KVM_HOST_SMCCC_ID(id) \
38
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
39
+ ARM_SMCCC_SMC_64, \
40
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
41
+ (id))
42
+
43
+#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
44
+
45
+#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
46
+#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1
47
+#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
48
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
49
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
50
+#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
51
+#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
52
+#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
53
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
54
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
55
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
56
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
57
+#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
58
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
59
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
60
+#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15
61
+#define __KVM_HOST_SMCCC_FUNC___pkvm_create_mappings 16
62
+#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
63
+#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
64
+#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
65
+#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
3566
3667 #ifndef __ASSEMBLY__
3768
3869 #include <linux/mm.h>
3970
40
-/* Translate a kernel address of @sym into its equivalent linear mapping */
41
-#define kvm_ksym_ref(sym) \
71
+#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
72
+#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
73
+
74
+/*
75
+ * Define a pair of symbols sharing the same name but one defined in
76
+ * VHE and the other in nVHE hyp implementations.
77
+ */
78
+#define DECLARE_KVM_HYP_SYM(sym) \
79
+ DECLARE_KVM_VHE_SYM(sym); \
80
+ DECLARE_KVM_NVHE_SYM(sym)
81
+
82
+#define DECLARE_KVM_VHE_PER_CPU(type, sym) \
83
+ DECLARE_PER_CPU(type, sym)
84
+#define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
85
+ DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
86
+
87
+#define DECLARE_KVM_HYP_PER_CPU(type, sym) \
88
+ DECLARE_KVM_VHE_PER_CPU(type, sym); \
89
+ DECLARE_KVM_NVHE_PER_CPU(type, sym)
90
+
91
+/*
92
+ * Compute pointer to a symbol defined in nVHE percpu region.
93
+ * Returns NULL if percpu memory has not been allocated yet.
94
+ */
95
+#define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
96
+#define per_cpu_ptr_nvhe_sym(sym, cpu) \
97
+ ({ \
98
+ unsigned long base, off; \
99
+ base = kvm_arm_hyp_percpu_base[cpu]; \
100
+ off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
101
+ (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
102
+ base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
103
+ })
104
+
105
+#if defined(__KVM_NVHE_HYPERVISOR__)
106
+
107
+#define CHOOSE_NVHE_SYM(sym) sym
108
+#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
109
+
110
+/* The nVHE hypervisor shouldn't even try to access VHE symbols */
111
+extern void *__nvhe_undefined_symbol;
112
+#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
113
+#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
114
+#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
115
+
116
+#elif defined(__KVM_VHE_HYPERVISOR__)
117
+
118
+#define CHOOSE_VHE_SYM(sym) sym
119
+#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
120
+
121
+/* The VHE hypervisor shouldn't even try to access nVHE symbols */
122
+extern void *__vhe_undefined_symbol;
123
+#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
124
+#define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
125
+#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
126
+
127
+#else
128
+
129
+/*
130
+ * BIG FAT WARNINGS:
131
+ *
132
+ * - Don't be tempted to change the following is_kernel_in_hyp_mode()
133
+ * to has_vhe(). has_vhe() is implemented as a *final* capability,
134
+ * while this is used early at boot time, when the capabilities are
135
+ * not final yet....
136
+ *
137
+ * - Don't let the nVHE hypervisor have access to this, as it will
138
+ * pick the *wrong* symbol (yes, it runs at EL2...).
139
+ */
140
+#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
141
+ ? CHOOSE_VHE_SYM(sym) \
142
+ : CHOOSE_NVHE_SYM(sym))
143
+
144
+#define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
145
+ ? this_cpu_ptr(&sym) \
146
+ : this_cpu_ptr_nvhe_sym(sym))
147
+
148
+#define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
149
+ ? per_cpu_ptr(&sym, cpu) \
150
+ : per_cpu_ptr_nvhe_sym(sym, cpu))
151
+
152
+#define CHOOSE_VHE_SYM(sym) sym
153
+#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
154
+
155
+#endif
156
+
157
+struct kvm_nvhe_init_params {
158
+ unsigned long mair_el2;
159
+ unsigned long tcr_el2;
160
+ unsigned long tpidr_el2;
161
+ unsigned long stack_hyp_va;
162
+ phys_addr_t pgd_pa;
163
+ unsigned long hcr_el2;
164
+ unsigned long vttbr;
165
+ unsigned long vtcr;
166
+};
167
+
168
+/* Translate a kernel address @ptr into its equivalent linear mapping */
169
+#define kvm_ksym_ref(ptr) \
42170 ({ \
43
- void *val = &sym; \
171
+ void *val = (ptr); \
44172 if (!is_kernel_in_hyp_mode()) \
45
- val = lm_alias(&sym); \
173
+ val = lm_alias((ptr)); \
46174 val; \
47175 })
176
+#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(__va_function(kvm_nvhe_sym(sym)))
48177
49178 struct kvm;
50179 struct kvm_vcpu;
180
+struct kvm_s2_mmu;
51181
52
-extern char __kvm_hyp_init[];
53
-extern char __kvm_hyp_init_end[];
182
+DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
183
+DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
184
+#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
185
+#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
54186
55
-extern char __kvm_hyp_vector[];
187
+extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
188
+DECLARE_KVM_NVHE_SYM(__per_cpu_start);
189
+DECLARE_KVM_NVHE_SYM(__per_cpu_end);
190
+
191
+DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
192
+#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
56193
57194 extern void __kvm_flush_vm_context(void);
58
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
59
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
60
-extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
195
+extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
196
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
197
+ int level);
198
+extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
61199
62
-extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
200
+extern void __kvm_timer_set_cntvoff(u64 cntvoff);
63201
64
-extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
202
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
65203
66
-extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
67
-
68
-extern u64 __vgic_v3_get_ich_vtr_el2(void);
204
+extern u64 __vgic_v3_get_gic_config(void);
69205 extern u64 __vgic_v3_read_vmcr(void);
70206 extern void __vgic_v3_write_vmcr(u32 vmcr);
71207 extern void __vgic_v3_init_lrs(void);
72208
73209 extern u32 __kvm_get_mdcr_el2(void);
74
-
75
-extern u32 __init_stage2_translation(void);
76
-
77
-/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
78
-#define __hyp_this_cpu_ptr(sym) \
79
- ({ \
80
- void *__ptr = hyp_symbol_addr(sym); \
81
- __ptr += read_sysreg(tpidr_el2); \
82
- (typeof(&sym))__ptr; \
83
- })
84
-
85
-#define __hyp_this_cpu_read(sym) \
86
- ({ \
87
- *__hyp_this_cpu_ptr(sym); \
88
- })
89210
90211 #define __KVM_EXTABLE(from, to) \
91212 " .pushsection __kvm_ex_table, \"a\"\n" \
....@@ -117,26 +238,24 @@
117238
118239 #else /* __ASSEMBLY__ */
119240
120
-.macro hyp_adr_this_cpu reg, sym, tmp
121
- adr_l \reg, \sym
122
- mrs \tmp, tpidr_el2
123
- add \reg, \reg, \tmp
124
-.endm
125
-
126
-.macro hyp_ldr_this_cpu reg, sym, tmp
127
- adr_l \reg, \sym
128
- mrs \tmp, tpidr_el2
129
- ldr \reg, [\reg, \tmp]
130
-.endm
131
-
132241 .macro get_host_ctxt reg, tmp
133
- hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
242
+ adr_this_cpu \reg, kvm_host_data, \tmp
243
+ add \reg, \reg, #HOST_DATA_CONTEXT
134244 .endm
135245
136246 .macro get_vcpu_ptr vcpu, ctxt
137247 get_host_ctxt \ctxt, \vcpu
138248 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
139
- kern_hyp_va \vcpu
249
+.endm
250
+
251
+.macro get_loaded_vcpu vcpu, ctxt
252
+ adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
253
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
254
+.endm
255
+
256
+.macro set_loaded_vcpu vcpu, ctxt, tmp
257
+ adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
258
+ str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
140259 .endm
141260
142261 /*
....@@ -154,6 +273,45 @@
154273 .popsection
155274 .endm
156275
276
+#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
277
+#define CPU_LR_OFFSET CPU_XREG_OFFSET(30)
278
+#define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8)
279
+
280
+/*
281
+ * We treat x18 as callee-saved as the host may use it as a platform
282
+ * register (e.g. for shadow call stack).
283
+ */
284
+.macro save_callee_saved_regs ctxt
285
+ str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
286
+ stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
287
+ stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
288
+ stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
289
+ stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
290
+ stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
291
+ stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
292
+.endm
293
+
294
+.macro restore_callee_saved_regs ctxt
295
+ // We require \ctxt is not x18-x28
296
+ ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
297
+ ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
298
+ ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
299
+ ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
300
+ ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
301
+ ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
302
+ ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
303
+.endm
304
+
305
+.macro save_sp_el0 ctxt, tmp
306
+ mrs \tmp, sp_el0
307
+ str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
308
+.endm
309
+
310
+.macro restore_sp_el0 ctxt, tmp
311
+ ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
312
+ msr sp_el0, \tmp
313
+.endm
314
+
157315 #endif
158316
159317 #endif /* __ARM_KVM_ASM_H__ */