| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2015-2018 - ARM Ltd |
|---|
| 3 | 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 6 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 7 | | - * published by the Free Software Foundation. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 12 | | - * GNU General Public License for more details. |
|---|
| 13 | | - * |
|---|
| 14 | | - * You should have received a copy of the GNU General Public License |
|---|
| 15 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 16 | 5 | */ |
|---|
| 17 | 6 | |
|---|
| 18 | 7 | #include <linux/arm-smccc.h> |
|---|
| .. | .. |
|---|
| 23 | 12 | #include <asm/cpufeature.h> |
|---|
| 24 | 13 | #include <asm/kvm_arm.h> |
|---|
| 25 | 14 | #include <asm/kvm_asm.h> |
|---|
| 26 | | -#include <asm/kvm_mmu.h> |
|---|
| 27 | 15 | #include <asm/mmu.h> |
|---|
| 16 | +#include <asm/spectre.h> |
|---|
| 28 | 17 | |
|---|
| 29 | 18 | .macro save_caller_saved_regs_vect |
|---|
| 30 | 19 | /* x0 and x1 were saved in the vector entry */ |
|---|
| .. | .. |
|---|
| 51 | 40 | .endm |
|---|
| 52 | 41 | |
|---|
| 53 | 42 | .text |
|---|
| 54 | | - .pushsection .hyp.text, "ax" |
|---|
| 55 | | - |
|---|
| 56 | | -.macro do_el2_call |
|---|
| 57 | | - /* |
|---|
| 58 | | - * Shuffle the parameters before calling the function |
|---|
| 59 | | - * pointed to in x0. Assumes parameters in x[1,2,3]. |
|---|
| 60 | | - */ |
|---|
| 61 | | - str lr, [sp, #-16]! |
|---|
| 62 | | - mov lr, x0 |
|---|
| 63 | | - mov x0, x1 |
|---|
| 64 | | - mov x1, x2 |
|---|
| 65 | | - mov x2, x3 |
|---|
| 66 | | - blr lr |
|---|
| 67 | | - ldr lr, [sp], #16 |
|---|
| 68 | | -.endm |
|---|
| 69 | | - |
|---|
| 70 | | -ENTRY(__vhe_hyp_call) |
|---|
| 71 | | - do_el2_call |
|---|
| 72 | | - /* |
|---|
| 73 | | - * We used to rely on having an exception return to get |
|---|
| 74 | | - * an implicit isb. In the E2H case, we don't have it anymore. |
|---|
| 75 | | - * rather than changing all the leaf functions, just do it here |
|---|
| 76 | | - * before returning to the rest of the kernel. |
|---|
| 77 | | - */ |
|---|
| 78 | | - isb |
|---|
| 79 | | - ret |
|---|
| 80 | | -ENDPROC(__vhe_hyp_call) |
|---|
| 81 | 43 | |
|---|
| 82 | 44 | el1_sync: // Guest trapped into EL2 |
|---|
| 83 | 45 | |
|---|
| 84 | 46 | mrs x0, esr_el2 |
|---|
| 85 | | - lsr x0, x0, #ESR_ELx_EC_SHIFT |
|---|
| 47 | + ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH |
|---|
| 86 | 48 | cmp x0, #ESR_ELx_EC_HVC64 |
|---|
| 87 | 49 | ccmp x0, #ESR_ELx_EC_HVC32, #4, ne |
|---|
| 88 | 50 | b.ne el1_trap |
|---|
| 89 | 51 | |
|---|
| 90 | | - mrs x1, vttbr_el2 // If vttbr is valid, the guest |
|---|
| 91 | | - cbnz x1, el1_hvc_guest // called HVC |
|---|
| 92 | | - |
|---|
| 93 | | - /* Here, we're pretty sure the host called HVC. */ |
|---|
| 94 | | - ldp x0, x1, [sp], #16 |
|---|
| 95 | | - |
|---|
| 96 | | - /* Check for a stub HVC call */ |
|---|
| 97 | | - cmp x0, #HVC_STUB_HCALL_NR |
|---|
| 98 | | - b.hs 1f |
|---|
| 99 | | - |
|---|
| 100 | | - /* |
|---|
| 101 | | - * Compute the idmap address of __kvm_handle_stub_hvc and |
|---|
| 102 | | - * jump there. Since we use kimage_voffset, do not use the |
|---|
| 103 | | - * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead |
|---|
| 104 | | - * (by loading it from the constant pool). |
|---|
| 105 | | - * |
|---|
| 106 | | - * Preserve x0-x4, which may contain stub parameters. |
|---|
| 107 | | - */ |
|---|
| 108 | | - ldr x5, =__kvm_handle_stub_hvc |
|---|
| 109 | | - ldr_l x6, kimage_voffset |
|---|
| 110 | | - |
|---|
| 111 | | - /* x5 = __pa(x5) */ |
|---|
| 112 | | - sub x5, x5, x6 |
|---|
| 113 | | - br x5 |
|---|
| 114 | | - |
|---|
| 115 | | -1: |
|---|
| 116 | | - /* |
|---|
| 117 | | - * Perform the EL2 call |
|---|
| 118 | | - */ |
|---|
| 119 | | - kern_hyp_va x0 |
|---|
| 120 | | - do_el2_call |
|---|
| 121 | | - |
|---|
| 122 | | - eret |
|---|
| 123 | | - sb |
|---|
| 124 | | - |
|---|
| 125 | | -el1_hvc_guest: |
|---|
| 126 | 52 | /* |
|---|
| 127 | 53 | * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. |
|---|
| 128 | 54 | * The workaround has already been applied on the host, |
|---|
| .. | .. |
|---|
| 136 | 62 | /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ |
|---|
| 137 | 63 | eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ |
|---|
| 138 | 64 | ARM_SMCCC_ARCH_WORKAROUND_2) |
|---|
| 65 | + cbz w1, wa_epilogue |
|---|
| 66 | + |
|---|
| 67 | + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \ |
|---|
| 68 | + ARM_SMCCC_ARCH_WORKAROUND_3) |
|---|
| 139 | 69 | cbnz w1, el1_trap |
|---|
| 140 | | - |
|---|
| 141 | | -#ifdef CONFIG_ARM64_SSBD |
|---|
| 142 | | -alternative_cb arm64_enable_wa2_handling |
|---|
| 143 | | - b wa2_end |
|---|
| 144 | | -alternative_cb_end |
|---|
| 145 | | - get_vcpu_ptr x2, x0 |
|---|
| 146 | | - ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] |
|---|
| 147 | | - |
|---|
| 148 | | - // Sanitize the argument and update the guest flags |
|---|
| 149 | | - ldr x1, [sp, #8] // Guest's x1 |
|---|
| 150 | | - clz w1, w1 // Murphy's device: |
|---|
| 151 | | - lsr w1, w1, #5 // w1 = !!w1 without using |
|---|
| 152 | | - eor w1, w1, #1 // the flags... |
|---|
| 153 | | - bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1 |
|---|
| 154 | | - str x0, [x2, #VCPU_WORKAROUND_FLAGS] |
|---|
| 155 | | - |
|---|
| 156 | | - /* Check that we actually need to perform the call */ |
|---|
| 157 | | - hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2 |
|---|
| 158 | | - cbz x0, wa2_end |
|---|
| 159 | | - |
|---|
| 160 | | - mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 |
|---|
| 161 | | - smc #0 |
|---|
| 162 | | - |
|---|
| 163 | | - /* Don't leak data from the SMC call */ |
|---|
| 164 | | - mov x3, xzr |
|---|
| 165 | | -wa2_end: |
|---|
| 166 | | - mov x2, xzr |
|---|
| 167 | | - mov x1, xzr |
|---|
| 168 | | -#endif |
|---|
| 169 | 70 | |
|---|
| 170 | 71 | wa_epilogue: |
|---|
| 171 | 72 | mov x0, xzr |
|---|
| .. | .. |
|---|
| 189 | 90 | b __guest_exit |
|---|
| 190 | 91 | |
|---|
| 191 | 92 | el2_sync: |
|---|
| 93 | + /* Check for illegal exception return */ |
|---|
| 94 | + mrs x0, spsr_el2 |
|---|
| 95 | + tbnz x0, #20, 1f |
|---|
| 96 | + |
|---|
| 192 | 97 | save_caller_saved_regs_vect |
|---|
| 193 | 98 | stp x29, x30, [sp, #-16]! |
|---|
| 194 | 99 | bl kvm_unexpected_el2_exception |
|---|
| .. | .. |
|---|
| 196 | 101 | restore_caller_saved_regs_vect |
|---|
| 197 | 102 | |
|---|
| 198 | 103 | eret |
|---|
| 104 | + |
|---|
| 105 | +1: |
|---|
| 106 | + /* Let's attempt a recovery from the illegal exception return */ |
|---|
| 107 | + get_vcpu_ptr x1, x0 |
|---|
| 108 | + mov x0, #ARM_EXCEPTION_IL |
|---|
| 109 | + b __guest_exit |
|---|
| 110 | + |
|---|
| 199 | 111 | |
|---|
| 200 | 112 | el2_error: |
|---|
| 201 | 113 | save_caller_saved_regs_vect |
|---|
| .. | .. |
|---|
| 209 | 121 | eret |
|---|
| 210 | 122 | sb |
|---|
| 211 | 123 | |
|---|
| 212 | | -ENTRY(__hyp_do_panic) |
|---|
| 213 | | - mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ |
|---|
| 214 | | - PSR_MODE_EL1h) |
|---|
| 215 | | - msr spsr_el2, lr |
|---|
| 216 | | - ldr lr, =panic |
|---|
| 217 | | - msr elr_el2, lr |
|---|
| 218 | | - eret |
|---|
| 219 | | - sb |
|---|
| 220 | | -ENDPROC(__hyp_do_panic) |
|---|
| 221 | | - |
|---|
| 222 | | -ENTRY(__hyp_panic) |
|---|
| 223 | | - get_host_ctxt x0, x1 |
|---|
| 224 | | - b hyp_panic |
|---|
| 225 | | -ENDPROC(__hyp_panic) |
|---|
| 226 | | - |
|---|
| 227 | | -.macro invalid_vector label, target = __hyp_panic |
|---|
| 124 | +.macro invalid_vector label, target = __guest_exit_panic |
|---|
| 228 | 125 | .align 2 |
|---|
| 229 | | -\label: |
|---|
| 126 | +SYM_CODE_START_LOCAL(\label) |
|---|
| 230 | 127 | b \target |
|---|
| 231 | | -ENDPROC(\label) |
|---|
| 128 | +SYM_CODE_END(\label) |
|---|
| 232 | 129 | .endm |
|---|
| 233 | 130 | |
|---|
| 234 | 131 | /* None of these should ever happen */ |
|---|
| .. | .. |
|---|
| 244 | 141 | |
|---|
| 245 | 142 | .align 11 |
|---|
| 246 | 143 | |
|---|
| 144 | +.macro check_preamble_length start, end |
|---|
| 145 | +/* kvm_patch_vector_branch() generates code that jumps over the preamble. */ |
|---|
| 146 | +.if ((\end-\start) != KVM_VECTOR_PREAMBLE) |
|---|
| 147 | + .error "KVM vector preamble length mismatch" |
|---|
| 148 | +.endif |
|---|
| 149 | +.endm |
|---|
| 150 | + |
|---|
| 247 | 151 | .macro valid_vect target |
|---|
| 248 | 152 | .align 7 |
|---|
| 153 | +661: |
|---|
| 154 | + esb |
|---|
| 249 | 155 | stp x0, x1, [sp, #-16]! |
|---|
| 156 | +662: |
|---|
| 250 | 157 | b \target |
|---|
| 158 | + |
|---|
| 159 | +check_preamble_length 661b, 662b |
|---|
| 251 | 160 | .endm |
|---|
| 252 | 161 | |
|---|
| 253 | 162 | .macro invalid_vect target |
|---|
| 254 | 163 | .align 7 |
|---|
| 164 | +661: |
|---|
| 165 | + nop |
|---|
| 166 | + stp x0, x1, [sp, #-16]! |
|---|
| 167 | +662: |
|---|
| 255 | 168 | b \target |
|---|
| 256 | | - ldp x0, x1, [sp], #16 |
|---|
| 257 | | - b \target |
|---|
| 169 | + |
|---|
| 170 | +check_preamble_length 661b, 662b |
|---|
| 258 | 171 | .endm |
|---|
| 259 | 172 | |
|---|
| 260 | | -ENTRY(__kvm_hyp_vector) |
|---|
| 173 | +SYM_CODE_START(__kvm_hyp_vector) |
|---|
| 261 | 174 | invalid_vect el2t_sync_invalid // Synchronous EL2t |
|---|
| 262 | 175 | invalid_vect el2t_irq_invalid // IRQ EL2t |
|---|
| 263 | 176 | invalid_vect el2t_fiq_invalid // FIQ EL2t |
|---|
| .. | .. |
|---|
| 277 | 190 | valid_vect el1_irq // IRQ 32-bit EL1 |
|---|
| 278 | 191 | invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 |
|---|
| 279 | 192 | valid_vect el1_error // Error 32-bit EL1 |
|---|
| 280 | | -ENDPROC(__kvm_hyp_vector) |
|---|
| 193 | +SYM_CODE_END(__kvm_hyp_vector) |
|---|
| 281 | 194 | |
|---|
| 282 | | -#ifdef CONFIG_KVM_INDIRECT_VECTORS |
|---|
| 283 | | -.macro hyp_ventry |
|---|
| 284 | | - .align 7 |
|---|
| 285 | | -1: .rept 27 |
|---|
| 286 | | - nop |
|---|
| 287 | | - .endr |
|---|
| 288 | | -/* |
|---|
| 289 | | - * The default sequence is to directly branch to the KVM vectors, |
|---|
| 290 | | - * using the computed offset. This applies for VHE as well as |
|---|
| 291 | | - * !ARM64_HARDEN_EL2_VECTORS. |
|---|
| 292 | | - * |
|---|
| 293 | | - * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced |
|---|
| 294 | | - * with: |
|---|
| 295 | | - * |
|---|
| 296 | | - * stp x0, x1, [sp, #-16]! |
|---|
| 297 | | - * movz x0, #(addr & 0xffff) |
|---|
| 298 | | - * movk x0, #((addr >> 16) & 0xffff), lsl #16 |
|---|
| 299 | | - * movk x0, #((addr >> 32) & 0xffff), lsl #32 |
|---|
| 300 | | - * br x0 |
|---|
| 301 | | - * |
|---|
| 302 | | - * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4. |
|---|
| 303 | | - * See kvm_patch_vector_branch for details. |
|---|
| 304 | | - */ |
|---|
| 305 | | -alternative_cb kvm_patch_vector_branch |
|---|
| 306 | | - b __kvm_hyp_vector + (1b - 0b) |
|---|
| 307 | | - nop |
|---|
| 308 | | - nop |
|---|
| 309 | | - nop |
|---|
| 310 | | - nop |
|---|
| 311 | | -alternative_cb_end |
|---|
| 195 | +.macro spectrev2_smccc_wa1_smc |
|---|
| 196 | + sub sp, sp, #(8 * 4) |
|---|
| 197 | + stp x2, x3, [sp, #(8 * 0)] |
|---|
| 198 | + stp x0, x1, [sp, #(8 * 2)] |
|---|
| 199 | + alternative_cb spectre_bhb_patch_wa3 |
|---|
| 200 | + /* Patched to mov WA3 when supported */ |
|---|
| 201 | + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 |
|---|
| 202 | + alternative_cb_end |
|---|
| 203 | + smc #0 |
|---|
| 204 | + ldp x2, x3, [sp, #(8 * 0)] |
|---|
| 205 | + add sp, sp, #(8 * 2) |
|---|
| 312 | 206 | .endm |
|---|
| 313 | 207 | |
|---|
| 314 | | -.macro generate_vectors |
|---|
| 208 | +.macro hyp_ventry indirect, spectrev2 |
|---|
| 209 | + .align 7 |
|---|
| 210 | +1: esb |
|---|
| 211 | + .if \spectrev2 != 0 |
|---|
| 212 | + spectrev2_smccc_wa1_smc |
|---|
| 213 | + .else |
|---|
| 214 | + stp x0, x1, [sp, #-16]! |
|---|
| 215 | + mitigate_spectre_bhb_loop x0 |
|---|
| 216 | + mitigate_spectre_bhb_clear_insn |
|---|
| 217 | + .endif |
|---|
| 218 | + .if \indirect != 0 |
|---|
| 219 | + alternative_cb kvm_patch_vector_branch |
|---|
| 220 | + /* |
|---|
| 221 | + * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with: |
|---|
| 222 | + * |
|---|
| 223 | + * movz x0, #(addr & 0xffff) |
|---|
| 224 | + * movk x0, #((addr >> 16) & 0xffff), lsl #16 |
|---|
| 225 | + * movk x0, #((addr >> 32) & 0xffff), lsl #32 |
|---|
| 226 | + * br x0 |
|---|
| 227 | + * |
|---|
| 228 | + * Where: |
|---|
| 229 | + * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE. |
|---|
| 230 | + * See kvm_patch_vector_branch for details. |
|---|
| 231 | + */ |
|---|
| 232 | + nop |
|---|
| 233 | + nop |
|---|
| 234 | + nop |
|---|
| 235 | + nop |
|---|
| 236 | + alternative_cb_end |
|---|
| 237 | + .endif |
|---|
| 238 | + b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE) |
|---|
| 239 | +.endm |
|---|
| 240 | + |
|---|
| 241 | +.macro generate_vectors indirect, spectrev2 |
|---|
| 315 | 242 | 0: |
|---|
| 316 | 243 | .rept 16 |
|---|
| 317 | | - hyp_ventry |
|---|
| 244 | + hyp_ventry \indirect, \spectrev2 |
|---|
| 318 | 245 | .endr |
|---|
| 319 | 246 | .org 0b + SZ_2K // Safety measure |
|---|
| 320 | 247 | .endm |
|---|
| 321 | 248 | |
|---|
| 322 | 249 | .align 11 |
|---|
| 323 | | -ENTRY(__bp_harden_hyp_vecs_start) |
|---|
| 324 | | - .rept BP_HARDEN_EL2_SLOTS |
|---|
| 325 | | - generate_vectors |
|---|
| 326 | | - .endr |
|---|
| 327 | | -ENTRY(__bp_harden_hyp_vecs_end) |
|---|
| 328 | | - |
|---|
| 329 | | - .popsection |
|---|
| 330 | | - |
|---|
| 331 | | -ENTRY(__smccc_workaround_1_smc_start) |
|---|
| 332 | | - sub sp, sp, #(8 * 4) |
|---|
| 333 | | - stp x2, x3, [sp, #(8 * 0)] |
|---|
| 334 | | - stp x0, x1, [sp, #(8 * 2)] |
|---|
| 335 | | - mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 |
|---|
| 336 | | - smc #0 |
|---|
| 337 | | - ldp x2, x3, [sp, #(8 * 0)] |
|---|
| 338 | | - ldp x0, x1, [sp, #(8 * 2)] |
|---|
| 339 | | - add sp, sp, #(8 * 4) |
|---|
| 340 | | -ENTRY(__smccc_workaround_1_smc_end) |
|---|
| 341 | | -#endif |
|---|
| 250 | +SYM_CODE_START(__bp_harden_hyp_vecs) |
|---|
| 251 | + generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT |
|---|
| 252 | + generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT |
|---|
| 253 | + generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT |
|---|
| 254 | +1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ |
|---|
| 255 | + .org 1b |
|---|
| 256 | +SYM_CODE_END(__bp_harden_hyp_vecs) |
|---|