| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2015 - ARM Ltd |
|---|
| 3 | 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 6 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 7 | | - * published by the Free Software Foundation. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 12 | | - * GNU General Public License for more details. |
|---|
| 13 | | - * |
|---|
| 14 | | - * You should have received a copy of the GNU General Public License |
|---|
| 15 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 16 | 5 | */ |
|---|
| 17 | 6 | |
|---|
| 18 | 7 | #include <linux/linkage.h> |
|---|
| 19 | 8 | |
|---|
| 20 | 9 | #include <asm/alternative.h> |
|---|
| 21 | | -#include <asm/asm-offsets.h> |
|---|
| 22 | 10 | #include <asm/assembler.h> |
|---|
| 23 | 11 | #include <asm/fpsimdmacros.h> |
|---|
| 24 | 12 | #include <asm/kvm.h> |
|---|
| 25 | 13 | #include <asm/kvm_arm.h> |
|---|
| 26 | 14 | #include <asm/kvm_asm.h> |
|---|
| 27 | 15 | #include <asm/kvm_mmu.h> |
|---|
| 28 | | - |
|---|
| 29 | | -#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) |
|---|
| 30 | | -#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) |
|---|
| 16 | +#include <asm/kvm_ptrauth.h> |
|---|
| 31 | 17 | |
|---|
| 32 | 18 | .text |
|---|
| 33 | | - .pushsection .hyp.text, "ax" |
|---|
| 34 | 19 | |
|---|
| 35 | 20 | /* |
|---|
| 36 | | - * We treat x18 as callee-saved as the host may use it as a platform |
|---|
| 37 | | - * register (e.g. for shadow call stack). |
|---|
| 21 | + * u64 __guest_enter(struct kvm_vcpu *vcpu); |
|---|
| 38 | 22 | */ |
|---|
| 39 | | -.macro save_callee_saved_regs ctxt |
|---|
| 40 | | - str x18, [\ctxt, #CPU_XREG_OFFSET(18)] |
|---|
| 41 | | - stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] |
|---|
| 42 | | - stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] |
|---|
| 43 | | - stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] |
|---|
| 44 | | - stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] |
|---|
| 45 | | - stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] |
|---|
| 46 | | - stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] |
|---|
| 47 | | -.endm |
|---|
| 48 | | - |
|---|
| 49 | | -.macro restore_callee_saved_regs ctxt |
|---|
| 50 | | - // We require \ctxt is not x18-x28 |
|---|
| 51 | | - ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] |
|---|
| 52 | | - ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] |
|---|
| 53 | | - ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] |
|---|
| 54 | | - ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] |
|---|
| 55 | | - ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] |
|---|
| 56 | | - ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] |
|---|
| 57 | | - ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] |
|---|
| 58 | | -.endm |
|---|
| 59 | | - |
|---|
| 60 | | -/* |
|---|
| 61 | | - * u64 __guest_enter(struct kvm_vcpu *vcpu, |
|---|
| 62 | | - * struct kvm_cpu_context *host_ctxt); |
|---|
| 63 | | - */ |
|---|
| 64 | | -ENTRY(__guest_enter) |
|---|
| 23 | +SYM_FUNC_START(__guest_enter) |
|---|
| 65 | 24 | // x0: vcpu |
|---|
| 66 | | - // x1: host context |
|---|
| 67 | | - // x2-x17: clobbered by macros |
|---|
| 25 | + // x1-x17: clobbered by macros |
|---|
| 68 | 26 | // x29: guest context |
|---|
| 69 | 27 | |
|---|
| 70 | | - // Store the host regs |
|---|
| 28 | + adr_this_cpu x1, kvm_hyp_ctxt, x2 |
|---|
| 29 | + |
|---|
| 30 | + // Store the hyp regs |
|---|
| 71 | 31 | save_callee_saved_regs x1 |
|---|
| 72 | 32 | |
|---|
| 73 | | - // Now the host state is stored if we have a pending RAS SError it must |
|---|
| 74 | | - // affect the host. If any asynchronous exception is pending we defer |
|---|
| 75 | | - // the guest entry. The DSB isn't necessary before v8.2 as any SError |
|---|
| 76 | | - // would be fatal. |
|---|
| 33 | + // Save hyp's sp_el0 |
|---|
| 34 | + save_sp_el0 x1, x2 |
|---|
| 35 | + |
|---|
| 36 | + // Now the hyp state is stored if we have a pending RAS SError it must |
|---|
| 37 | + // affect the host or hyp. If any asynchronous exception is pending we |
|---|
| 38 | + // defer the guest entry. The DSB isn't necessary before v8.2 as any |
|---|
| 39 | + // SError would be fatal. |
|---|
| 77 | 40 | alternative_if ARM64_HAS_RAS_EXTN |
|---|
| 78 | 41 | dsb nshst |
|---|
| 79 | 42 | isb |
|---|
| .. | .. |
|---|
| 84 | 47 | ret |
|---|
| 85 | 48 | |
|---|
| 86 | 49 | 1: |
|---|
| 50 | + set_loaded_vcpu x0, x1, x2 |
|---|
| 51 | + |
|---|
| 87 | 52 | add x29, x0, #VCPU_CONTEXT |
|---|
| 53 | + |
|---|
| 54 | + // Macro ptrauth_switch_to_guest format: |
|---|
| 55 | + // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) |
|---|
| 56 | + // The below macro to restore guest keys is not implemented in C code |
|---|
| 57 | + // as it may cause Pointer Authentication key signing mismatch errors |
|---|
| 58 | + // when this feature is enabled for kernel code. |
|---|
| 59 | + ptrauth_switch_to_guest x29, x0, x1, x2 |
|---|
| 60 | + |
|---|
| 61 | + // Restore the guest's sp_el0 |
|---|
| 62 | + restore_sp_el0 x29, x0 |
|---|
| 88 | 63 | |
|---|
| 89 | 64 | // Restore guest regs x0-x17 |
|---|
| 90 | 65 | ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] |
|---|
| .. | .. |
|---|
| 103 | 78 | // Do not touch any register after this! |
|---|
| 104 | 79 | eret |
|---|
| 105 | 80 | sb |
|---|
| 106 | | -ENDPROC(__guest_enter) |
|---|
| 107 | 81 | |
|---|
| 108 | | -ENTRY(__guest_exit) |
|---|
| 82 | +SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) |
|---|
| 83 | + // x2-x29,lr: vcpu regs |
|---|
| 84 | + // vcpu x0-x1 on the stack |
|---|
| 85 | + |
|---|
| 86 | + // If the hyp context is loaded, go straight to hyp_panic |
|---|
| 87 | + get_loaded_vcpu x0, x1 |
|---|
| 88 | + cbnz x0, 1f |
|---|
| 89 | + b hyp_panic |
|---|
| 90 | + |
|---|
| 91 | +1: |
|---|
| 92 | + // The hyp context is saved so make sure it is restored to allow |
|---|
| 93 | + // hyp_panic to run at hyp and, subsequently, panic to run in the host. |
|---|
| 94 | + // This makes use of __guest_exit to avoid duplication but sets the |
|---|
| 95 | + // return address to tail call into hyp_panic. As a side effect, the |
|---|
| 96 | + // current state is saved to the guest context but it will only be |
|---|
| 97 | + // accurate if the guest had been completely restored. |
|---|
| 98 | + adr_this_cpu x0, kvm_hyp_ctxt, x1 |
|---|
| 99 | + adr_l x1, hyp_panic |
|---|
| 100 | + str x1, [x0, #CPU_XREG_OFFSET(30)] |
|---|
| 101 | + |
|---|
| 102 | + get_vcpu_ptr x1, x0 |
|---|
| 103 | + |
|---|
| 104 | +SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) |
|---|
| 109 | 105 | // x0: return code |
|---|
| 110 | 106 | // x1: vcpu |
|---|
| 111 | 107 | // x2-x29,lr: vcpu regs |
|---|
| .. | .. |
|---|
| 134 | 130 | // Store the guest regs x18-x29, lr |
|---|
| 135 | 131 | save_callee_saved_regs x1 |
|---|
| 136 | 132 | |
|---|
| 137 | | - get_host_ctxt x2, x3 |
|---|
| 133 | + // Store the guest's sp_el0 |
|---|
| 134 | + save_sp_el0 x1, x2 |
|---|
| 138 | 135 | |
|---|
| 139 | | - // Now restore the host regs |
|---|
| 136 | + adr_this_cpu x2, kvm_hyp_ctxt, x3 |
|---|
| 137 | + |
|---|
| 138 | + // Macro ptrauth_switch_to_hyp format: |
|---|
| 139 | + // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3) |
|---|
| 140 | + // The below macro to save/restore keys is not implemented in C code |
|---|
| 141 | + // as it may cause Pointer Authentication key signing mismatch errors |
|---|
| 142 | + // when this feature is enabled for kernel code. |
|---|
| 143 | + ptrauth_switch_to_hyp x1, x2, x3, x4, x5 |
|---|
| 144 | + |
|---|
| 145 | + // Restore hyp's sp_el0 |
|---|
| 146 | + restore_sp_el0 x2, x3 |
|---|
| 147 | + |
|---|
| 148 | + // Now restore the hyp regs |
|---|
| 140 | 149 | restore_callee_saved_regs x2 |
|---|
| 150 | + |
|---|
| 151 | + set_loaded_vcpu xzr, x2, x3 |
|---|
| 141 | 152 | |
|---|
| 142 | 153 | alternative_if ARM64_HAS_RAS_EXTN |
|---|
| 143 | 154 | // If we have the RAS extensions we can consume a pending error |
|---|
| 144 | | - // without an unmask-SError and isb. |
|---|
| 145 | | - esb |
|---|
| 155 | + // without an unmask-SError and isb. The ESB-instruction consumed any |
|---|
| 156 | + // pending guest error when we took the exception from the guest. |
|---|
| 146 | 157 | mrs_s x2, SYS_DISR_EL1 |
|---|
| 147 | 158 | str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)] |
|---|
| 148 | 159 | cbz x2, 1f |
|---|
| .. | .. |
|---|
| 150 | 161 | orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT) |
|---|
| 151 | 162 | 1: ret |
|---|
| 152 | 163 | alternative_else |
|---|
| 153 | | - // If we have a pending asynchronous abort, now is the |
|---|
| 154 | | - // time to find out. From your VAXorcist book, page 666: |
|---|
| 164 | + dsb sy // Synchronize against in-flight ld/st |
|---|
| 165 | + isb // Prevent an early read of side-effect free ISR |
|---|
| 166 | + mrs x2, isr_el1 |
|---|
| 167 | + tbnz x2, #8, 2f // ISR_EL1.A |
|---|
| 168 | + ret |
|---|
| 169 | + nop |
|---|
| 170 | +2: |
|---|
| 171 | +alternative_endif |
|---|
| 172 | + // We know we have a pending asynchronous abort, now is the |
|---|
| 173 | + // time to flush it out. From your VAXorcist book, page 666: |
|---|
| 155 | 174 | // "Threaten me not, oh Evil one! For I speak with |
|---|
| 156 | 175 | // the power of DEC, and I command thee to show thyself!" |
|---|
| 157 | 176 | mrs x2, elr_el2 |
|---|
| .. | .. |
|---|
| 159 | 178 | mrs x4, spsr_el2 |
|---|
| 160 | 179 | mov x5, x0 |
|---|
| 161 | 180 | |
|---|
| 162 | | - dsb sy // Synchronize against in-flight ld/st |
|---|
| 163 | | - nop |
|---|
| 164 | 181 | msr daifclr, #4 // Unmask aborts |
|---|
| 165 | | -alternative_endif |
|---|
| 166 | 182 | |
|---|
| 167 | 183 | // This is our single instruction exception window. A pending |
|---|
| 168 | 184 | // SError is guaranteed to occur at the earliest when we unmask |
|---|
| .. | .. |
|---|
| 172 | 188 | isb |
|---|
| 173 | 189 | |
|---|
| 174 | 190 | abort_guest_exit_end: |
|---|
| 191 | + |
|---|
| 175 | 192 | msr daifset, #4 // Mask aborts |
|---|
| 176 | 193 | ret |
|---|
| 177 | 194 | |
|---|
| .. | .. |
|---|
| 188 | 205 | msr spsr_el2, x4 |
|---|
| 189 | 206 | orr x0, x0, x5 |
|---|
| 190 | 207 | 1: ret |
|---|
| 191 | | -ENDPROC(__guest_exit) |
|---|
| 208 | +SYM_FUNC_END(__guest_enter) |
|---|