hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/arm64/kvm/hyp/hyp-entry.S
....@@ -1,18 +1,7 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2015-2018 - ARM Ltd
34 * Author: Marc Zyngier <marc.zyngier@arm.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify
6
- * it under the terms of the GNU General Public License version 2 as
7
- * published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
187 #include <linux/arm-smccc.h>
....@@ -23,8 +12,8 @@
2312 #include <asm/cpufeature.h>
2413 #include <asm/kvm_arm.h>
2514 #include <asm/kvm_asm.h>
26
-#include <asm/kvm_mmu.h>
2715 #include <asm/mmu.h>
16
+#include <asm/spectre.h>
2817
2918 .macro save_caller_saved_regs_vect
3019 /* x0 and x1 were saved in the vector entry */
....@@ -51,78 +40,15 @@
5140 .endm
5241
5342 .text
54
- .pushsection .hyp.text, "ax"
55
-
56
-.macro do_el2_call
57
- /*
58
- * Shuffle the parameters before calling the function
59
- * pointed to in x0. Assumes parameters in x[1,2,3].
60
- */
61
- str lr, [sp, #-16]!
62
- mov lr, x0
63
- mov x0, x1
64
- mov x1, x2
65
- mov x2, x3
66
- blr lr
67
- ldr lr, [sp], #16
68
-.endm
69
-
70
-ENTRY(__vhe_hyp_call)
71
- do_el2_call
72
- /*
73
- * We used to rely on having an exception return to get
74
- * an implicit isb. In the E2H case, we don't have it anymore.
75
- * rather than changing all the leaf functions, just do it here
76
- * before returning to the rest of the kernel.
77
- */
78
- isb
79
- ret
80
-ENDPROC(__vhe_hyp_call)
8143
8244 el1_sync: // Guest trapped into EL2
8345
8446 mrs x0, esr_el2
85
- lsr x0, x0, #ESR_ELx_EC_SHIFT
47
+ ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
8648 cmp x0, #ESR_ELx_EC_HVC64
8749 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
8850 b.ne el1_trap
8951
90
- mrs x1, vttbr_el2 // If vttbr is valid, the guest
91
- cbnz x1, el1_hvc_guest // called HVC
92
-
93
- /* Here, we're pretty sure the host called HVC. */
94
- ldp x0, x1, [sp], #16
95
-
96
- /* Check for a stub HVC call */
97
- cmp x0, #HVC_STUB_HCALL_NR
98
- b.hs 1f
99
-
100
- /*
101
- * Compute the idmap address of __kvm_handle_stub_hvc and
102
- * jump there. Since we use kimage_voffset, do not use the
103
- * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
104
- * (by loading it from the constant pool).
105
- *
106
- * Preserve x0-x4, which may contain stub parameters.
107
- */
108
- ldr x5, =__kvm_handle_stub_hvc
109
- ldr_l x6, kimage_voffset
110
-
111
- /* x5 = __pa(x5) */
112
- sub x5, x5, x6
113
- br x5
114
-
115
-1:
116
- /*
117
- * Perform the EL2 call
118
- */
119
- kern_hyp_va x0
120
- do_el2_call
121
-
122
- eret
123
- sb
124
-
125
-el1_hvc_guest:
12652 /*
12753 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
12854 * The workaround has already been applied on the host,
....@@ -136,36 +62,11 @@
13662 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
13763 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
13864 ARM_SMCCC_ARCH_WORKAROUND_2)
65
+ cbz w1, wa_epilogue
66
+
67
+ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
68
+ ARM_SMCCC_ARCH_WORKAROUND_3)
13969 cbnz w1, el1_trap
140
-
141
-#ifdef CONFIG_ARM64_SSBD
142
-alternative_cb arm64_enable_wa2_handling
143
- b wa2_end
144
-alternative_cb_end
145
- get_vcpu_ptr x2, x0
146
- ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
147
-
148
- // Sanitize the argument and update the guest flags
149
- ldr x1, [sp, #8] // Guest's x1
150
- clz w1, w1 // Murphy's device:
151
- lsr w1, w1, #5 // w1 = !!w1 without using
152
- eor w1, w1, #1 // the flags...
153
- bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
154
- str x0, [x2, #VCPU_WORKAROUND_FLAGS]
155
-
156
- /* Check that we actually need to perform the call */
157
- hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
158
- cbz x0, wa2_end
159
-
160
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
161
- smc #0
162
-
163
- /* Don't leak data from the SMC call */
164
- mov x3, xzr
165
-wa2_end:
166
- mov x2, xzr
167
- mov x1, xzr
168
-#endif
16970
17071 wa_epilogue:
17172 mov x0, xzr
....@@ -189,6 +90,10 @@
18990 b __guest_exit
19091
19192 el2_sync:
93
+ /* Check for illegal exception return */
94
+ mrs x0, spsr_el2
95
+ tbnz x0, #20, 1f
96
+
19297 save_caller_saved_regs_vect
19398 stp x29, x30, [sp, #-16]!
19499 bl kvm_unexpected_el2_exception
....@@ -196,6 +101,13 @@
196101 restore_caller_saved_regs_vect
197102
198103 eret
104
+
105
+1:
106
+ /* Let's attempt a recovery from the illegal exception return */
107
+ get_vcpu_ptr x1, x0
108
+ mov x0, #ARM_EXCEPTION_IL
109
+ b __guest_exit
110
+
199111
200112 el2_error:
201113 save_caller_saved_regs_vect
....@@ -209,26 +121,11 @@
209121 eret
210122 sb
211123
212
-ENTRY(__hyp_do_panic)
213
- mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
214
- PSR_MODE_EL1h)
215
- msr spsr_el2, lr
216
- ldr lr, =panic
217
- msr elr_el2, lr
218
- eret
219
- sb
220
-ENDPROC(__hyp_do_panic)
221
-
222
-ENTRY(__hyp_panic)
223
- get_host_ctxt x0, x1
224
- b hyp_panic
225
-ENDPROC(__hyp_panic)
226
-
227
-.macro invalid_vector label, target = __hyp_panic
124
+.macro invalid_vector label, target = __guest_exit_panic
228125 .align 2
229
-\label:
126
+SYM_CODE_START_LOCAL(\label)
230127 b \target
231
-ENDPROC(\label)
128
+SYM_CODE_END(\label)
232129 .endm
233130
234131 /* None of these should ever happen */
....@@ -244,20 +141,36 @@
244141
245142 .align 11
246143
144
+.macro check_preamble_length start, end
145
+/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
146
+.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
147
+ .error "KVM vector preamble length mismatch"
148
+.endif
149
+.endm
150
+
247151 .macro valid_vect target
248152 .align 7
153
+661:
154
+ esb
249155 stp x0, x1, [sp, #-16]!
156
+662:
250157 b \target
158
+
159
+check_preamble_length 661b, 662b
251160 .endm
252161
253162 .macro invalid_vect target
254163 .align 7
164
+661:
165
+ nop
166
+ stp x0, x1, [sp, #-16]!
167
+662:
255168 b \target
256
- ldp x0, x1, [sp], #16
257
- b \target
169
+
170
+check_preamble_length 661b, 662b
258171 .endm
259172
260
-ENTRY(__kvm_hyp_vector)
173
+SYM_CODE_START(__kvm_hyp_vector)
261174 invalid_vect el2t_sync_invalid // Synchronous EL2t
262175 invalid_vect el2t_irq_invalid // IRQ EL2t
263176 invalid_vect el2t_fiq_invalid // FIQ EL2t
....@@ -277,65 +190,67 @@
277190 valid_vect el1_irq // IRQ 32-bit EL1
278191 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
279192 valid_vect el1_error // Error 32-bit EL1
280
-ENDPROC(__kvm_hyp_vector)
193
+SYM_CODE_END(__kvm_hyp_vector)
281194
282
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
283
-.macro hyp_ventry
284
- .align 7
285
-1: .rept 27
286
- nop
287
- .endr
288
-/*
289
- * The default sequence is to directly branch to the KVM vectors,
290
- * using the computed offset. This applies for VHE as well as
291
- * !ARM64_HARDEN_EL2_VECTORS.
292
- *
293
- * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
294
- * with:
295
- *
296
- * stp x0, x1, [sp, #-16]!
297
- * movz x0, #(addr & 0xffff)
298
- * movk x0, #((addr >> 16) & 0xffff), lsl #16
299
- * movk x0, #((addr >> 32) & 0xffff), lsl #32
300
- * br x0
301
- *
302
- * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
303
- * See kvm_patch_vector_branch for details.
304
- */
305
-alternative_cb kvm_patch_vector_branch
306
- b __kvm_hyp_vector + (1b - 0b)
307
- nop
308
- nop
309
- nop
310
- nop
311
-alternative_cb_end
195
+.macro spectrev2_smccc_wa1_smc
196
+ sub sp, sp, #(8 * 4)
197
+ stp x2, x3, [sp, #(8 * 0)]
198
+ stp x0, x1, [sp, #(8 * 2)]
199
+ alternative_cb spectre_bhb_patch_wa3
200
+ /* Patched to mov WA3 when supported */
201
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
202
+ alternative_cb_end
203
+ smc #0
204
+ ldp x2, x3, [sp, #(8 * 0)]
205
+ add sp, sp, #(8 * 2)
312206 .endm
313207
314
-.macro generate_vectors
208
+.macro hyp_ventry indirect, spectrev2
209
+ .align 7
210
+1: esb
211
+ .if \spectrev2 != 0
212
+ spectrev2_smccc_wa1_smc
213
+ .else
214
+ stp x0, x1, [sp, #-16]!
215
+ mitigate_spectre_bhb_loop x0
216
+ mitigate_spectre_bhb_clear_insn
217
+ .endif
218
+ .if \indirect != 0
219
+ alternative_cb kvm_patch_vector_branch
220
+ /*
221
+ * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
222
+ *
223
+ * movz x0, #(addr & 0xffff)
224
+ * movk x0, #((addr >> 16) & 0xffff), lsl #16
225
+ * movk x0, #((addr >> 32) & 0xffff), lsl #32
226
+ * br x0
227
+ *
228
+ * Where:
229
+ * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
230
+ * See kvm_patch_vector_branch for details.
231
+ */
232
+ nop
233
+ nop
234
+ nop
235
+ nop
236
+ alternative_cb_end
237
+ .endif
238
+ b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
239
+.endm
240
+
241
+.macro generate_vectors indirect, spectrev2
315242 0:
316243 .rept 16
317
- hyp_ventry
244
+ hyp_ventry \indirect, \spectrev2
318245 .endr
319246 .org 0b + SZ_2K // Safety measure
320247 .endm
321248
322249 .align 11
323
-ENTRY(__bp_harden_hyp_vecs_start)
324
- .rept BP_HARDEN_EL2_SLOTS
325
- generate_vectors
326
- .endr
327
-ENTRY(__bp_harden_hyp_vecs_end)
328
-
329
- .popsection
330
-
331
-ENTRY(__smccc_workaround_1_smc_start)
332
- sub sp, sp, #(8 * 4)
333
- stp x2, x3, [sp, #(8 * 0)]
334
- stp x0, x1, [sp, #(8 * 2)]
335
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
336
- smc #0
337
- ldp x2, x3, [sp, #(8 * 0)]
338
- ldp x0, x1, [sp, #(8 * 2)]
339
- add sp, sp, #(8 * 4)
340
-ENTRY(__smccc_workaround_1_smc_end)
341
-#endif
250
+SYM_CODE_START(__bp_harden_hyp_vecs)
251
+ generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
252
+ generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
253
+ generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
254
+1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
255
+ .org 1b
256
+SYM_CODE_END(__bp_harden_hyp_vecs)