.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Fault injection for both 32 and 64bit guests. |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Based on arch/arm/kvm/emulate.c |
---|
8 | 9 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
---|
9 | 10 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
---|
10 | | - * |
---|
11 | | - * This program is free software: you can redistribute it and/or modify |
---|
12 | | - * it under the terms of the GNU General Public License version 2 as |
---|
13 | | - * published by the Free Software Foundation. |
---|
14 | | - * |
---|
15 | | - * This program is distributed in the hope that it will be useful, |
---|
16 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
17 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
18 | | - * GNU General Public License for more details. |
---|
19 | | - * |
---|
20 | | - * You should have received a copy of the GNU General Public License |
---|
21 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
22 | 11 | */ |
---|
23 | 12 | |
---|
24 | 13 | #include <linux/kvm_host.h> |
---|
25 | 14 | #include <asm/kvm_emulate.h> |
---|
26 | 15 | #include <asm/esr.h> |
---|
27 | | - |
---|
28 | | -#define CURRENT_EL_SP_EL0_VECTOR 0x0 |
---|
29 | | -#define CURRENT_EL_SP_ELx_VECTOR 0x200 |
---|
30 | | -#define LOWER_EL_AArch64_VECTOR 0x400 |
---|
31 | | -#define LOWER_EL_AArch32_VECTOR 0x600 |
---|
32 | | - |
---|
33 | | -enum exception_type { |
---|
34 | | - except_type_sync = 0, |
---|
35 | | - except_type_irq = 0x80, |
---|
36 | | - except_type_fiq = 0x100, |
---|
37 | | - except_type_serror = 0x180, |
---|
38 | | -}; |
---|
39 | | - |
---|
40 | | -static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type) |
---|
41 | | -{ |
---|
42 | | - u64 exc_offset; |
---|
43 | | - |
---|
44 | | - switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) { |
---|
45 | | - case PSR_MODE_EL1t: |
---|
46 | | - exc_offset = CURRENT_EL_SP_EL0_VECTOR; |
---|
47 | | - break; |
---|
48 | | - case PSR_MODE_EL1h: |
---|
49 | | - exc_offset = CURRENT_EL_SP_ELx_VECTOR; |
---|
50 | | - break; |
---|
51 | | - case PSR_MODE_EL0t: |
---|
52 | | - exc_offset = LOWER_EL_AArch64_VECTOR; |
---|
53 | | - break; |
---|
54 | | - default: |
---|
55 | | - exc_offset = LOWER_EL_AArch32_VECTOR; |
---|
56 | | - } |
---|
57 | | - |
---|
58 | | - return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type; |
---|
59 | | -} |
---|
60 | | - |
---|
61 | | -/* |
---|
62 | | - * When an exception is taken, most PSTATE fields are left unchanged in the |
---|
63 | | - * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all |
---|
64 | | - * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx |
---|
65 | | - * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0. |
---|
66 | | - * |
---|
67 | | - * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429. |
---|
68 | | - * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426. |
---|
69 | | - * |
---|
70 | | - * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from |
---|
71 | | - * MSB to LSB. |
---|
72 | | - */ |
---|
73 | | -static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu) |
---|
74 | | -{ |
---|
75 | | - unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); |
---|
76 | | - unsigned long old, new; |
---|
77 | | - |
---|
78 | | - old = *vcpu_cpsr(vcpu); |
---|
79 | | - new = 0; |
---|
80 | | - |
---|
81 | | - new |= (old & PSR_N_BIT); |
---|
82 | | - new |= (old & PSR_Z_BIT); |
---|
83 | | - new |= (old & PSR_C_BIT); |
---|
84 | | - new |= (old & PSR_V_BIT); |
---|
85 | | - |
---|
86 | | - // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests) |
---|
87 | | - |
---|
88 | | - new |= (old & PSR_DIT_BIT); |
---|
89 | | - |
---|
90 | | - // PSTATE.UAO is set to zero upon any exception to AArch64 |
---|
91 | | - // See ARM DDI 0487E.a, page D5-2579. |
---|
92 | | - |
---|
93 | | - // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0 |
---|
94 | | - // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented |
---|
95 | | - // See ARM DDI 0487E.a, page D5-2578. |
---|
96 | | - new |= (old & PSR_PAN_BIT); |
---|
97 | | - if (!(sctlr & SCTLR_EL1_SPAN)) |
---|
98 | | - new |= PSR_PAN_BIT; |
---|
99 | | - |
---|
100 | | - // PSTATE.SS is set to zero upon any exception to AArch64 |
---|
101 | | - // See ARM DDI 0487E.a, page D2-2452. |
---|
102 | | - |
---|
103 | | - // PSTATE.IL is set to zero upon any exception to AArch64 |
---|
104 | | - // See ARM DDI 0487E.a, page D1-2306. |
---|
105 | | - |
---|
106 | | - // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64 |
---|
107 | | - // See ARM DDI 0487E.a, page D13-3258 |
---|
108 | | - if (sctlr & SCTLR_ELx_DSSBS) |
---|
109 | | - new |= PSR_SSBS_BIT; |
---|
110 | | - |
---|
111 | | - // PSTATE.BTYPE is set to zero upon any exception to AArch64 |
---|
112 | | - // See ARM DDI 0487E.a, pages D1-2293 to D1-2294. |
---|
113 | | - |
---|
114 | | - new |= PSR_D_BIT; |
---|
115 | | - new |= PSR_A_BIT; |
---|
116 | | - new |= PSR_I_BIT; |
---|
117 | | - new |= PSR_F_BIT; |
---|
118 | | - |
---|
119 | | - new |= PSR_MODE_EL1h; |
---|
120 | | - |
---|
121 | | - return new; |
---|
122 | | -} |
---|
123 | 16 | |
---|
124 | 17 | static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) |
---|
125 | 18 | { |
---|
.. | .. |
---|
127 | 20 | bool is_aarch32 = vcpu_mode_is_32bit(vcpu); |
---|
128 | 21 | u32 esr = 0; |
---|
129 | 22 | |
---|
130 | | - vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu)); |
---|
131 | | - *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); |
---|
132 | | - |
---|
133 | | - *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu); |
---|
134 | | - vcpu_write_spsr(vcpu, cpsr); |
---|
| 23 | + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | |
---|
| 24 | + KVM_ARM64_EXCEPT_AA64_ELx_SYNC | |
---|
| 25 | + KVM_ARM64_PENDING_EXCEPTION); |
---|
135 | 26 | |
---|
136 | 27 | vcpu_write_sys_reg(vcpu, addr, FAR_EL1); |
---|
137 | 28 | |
---|
.. | .. |
---|
159 | 50 | |
---|
160 | 51 | static void inject_undef64(struct kvm_vcpu *vcpu) |
---|
161 | 52 | { |
---|
162 | | - unsigned long cpsr = *vcpu_cpsr(vcpu); |
---|
163 | 53 | u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); |
---|
164 | 54 | |
---|
165 | | - vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu)); |
---|
166 | | - *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); |
---|
167 | | - |
---|
168 | | - *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu); |
---|
169 | | - vcpu_write_spsr(vcpu, cpsr); |
---|
| 55 | + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | |
---|
| 56 | + KVM_ARM64_EXCEPT_AA64_ELx_SYNC | |
---|
| 57 | + KVM_ARM64_PENDING_EXCEPTION); |
---|
170 | 58 | |
---|
171 | 59 | /* |
---|
172 | 60 | * Build an unknown exception, depending on the instruction |
---|
.. | .. |
---|
178 | 66 | vcpu_write_sys_reg(vcpu, esr, ESR_EL1); |
---|
179 | 67 | } |
---|
180 | 68 | |
---|
| 69 | +#define DFSR_FSC_EXTABT_LPAE 0x10 |
---|
| 70 | +#define DFSR_FSC_EXTABT_nLPAE 0x08 |
---|
| 71 | +#define DFSR_LPAE BIT(9) |
---|
| 72 | +#define TTBCR_EAE BIT(31) |
---|
| 73 | + |
---|
| 74 | +static void inject_undef32(struct kvm_vcpu *vcpu) |
---|
| 75 | +{ |
---|
| 76 | + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_UND | |
---|
| 77 | + KVM_ARM64_PENDING_EXCEPTION); |
---|
| 78 | +} |
---|
| 79 | + |
---|
| 80 | +/* |
---|
| 81 | + * Modelled after TakeDataAbortException() and TakePrefetchAbortException |
---|
| 82 | + * pseudocode. |
---|
| 83 | + */ |
---|
| 84 | +static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr) |
---|
| 85 | +{ |
---|
| 86 | + u64 far; |
---|
| 87 | + u32 fsr; |
---|
| 88 | + |
---|
| 89 | + /* Give the guest an IMPLEMENTATION DEFINED exception */ |
---|
| 90 | + if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) { |
---|
| 91 | + fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE; |
---|
| 92 | + } else { |
---|
| 93 | + /* no need to shuffle FS[4] into DFSR[10] as its 0 */ |
---|
| 94 | + fsr = DFSR_FSC_EXTABT_nLPAE; |
---|
| 95 | + } |
---|
| 96 | + |
---|
| 97 | + far = vcpu_read_sys_reg(vcpu, FAR_EL1); |
---|
| 98 | + |
---|
| 99 | + if (is_pabt) { |
---|
| 100 | + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT | |
---|
| 101 | + KVM_ARM64_PENDING_EXCEPTION); |
---|
| 102 | + far &= GENMASK(31, 0); |
---|
| 103 | + far |= (u64)addr << 32; |
---|
| 104 | + vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2); |
---|
| 105 | + } else { /* !iabt */ |
---|
| 106 | + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT | |
---|
| 107 | + KVM_ARM64_PENDING_EXCEPTION); |
---|
| 108 | + far &= GENMASK(63, 32); |
---|
| 109 | + far |= addr; |
---|
| 110 | + vcpu_write_sys_reg(vcpu, fsr, ESR_EL1); |
---|
| 111 | + } |
---|
| 112 | + |
---|
| 113 | + vcpu_write_sys_reg(vcpu, far, FAR_EL1); |
---|
| 114 | +} |
---|
| 115 | + |
---|
181 | 116 | /** |
---|
182 | 117 | * kvm_inject_dabt - inject a data abort into the guest |
---|
183 | | - * @vcpu: The VCPU to receive the undefined exception |
---|
| 118 | + * @vcpu: The VCPU to receive the data abort |
---|
184 | 119 | * @addr: The address to report in the DFAR |
---|
185 | 120 | * |
---|
186 | 121 | * It is assumed that this code is called from the VCPU thread and that the |
---|
.. | .. |
---|
189 | 124 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
---|
190 | 125 | { |
---|
191 | 126 | if (vcpu_el1_is_32bit(vcpu)) |
---|
192 | | - kvm_inject_dabt32(vcpu, addr); |
---|
| 127 | + inject_abt32(vcpu, false, addr); |
---|
193 | 128 | else |
---|
194 | 129 | inject_abt64(vcpu, false, addr); |
---|
195 | 130 | } |
---|
196 | 131 | |
---|
197 | 132 | /** |
---|
198 | 133 | * kvm_inject_pabt - inject a prefetch abort into the guest |
---|
199 | | - * @vcpu: The VCPU to receive the undefined exception |
---|
| 134 | + * @vcpu: The VCPU to receive the prefetch abort |
---|
200 | 135 | * @addr: The address to report in the DFAR |
---|
201 | 136 | * |
---|
202 | 137 | * It is assumed that this code is called from the VCPU thread and that the |
---|
.. | .. |
---|
205 | 140 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
---|
206 | 141 | { |
---|
207 | 142 | if (vcpu_el1_is_32bit(vcpu)) |
---|
208 | | - kvm_inject_pabt32(vcpu, addr); |
---|
| 143 | + inject_abt32(vcpu, true, addr); |
---|
209 | 144 | else |
---|
210 | 145 | inject_abt64(vcpu, true, addr); |
---|
211 | 146 | } |
---|
212 | 147 | |
---|
213 | 148 | /** |
---|
214 | 149 | * kvm_inject_undefined - inject an undefined instruction into the guest |
---|
| 150 | + * @vcpu: The vCPU in which to inject the exception |
---|
215 | 151 | * |
---|
216 | 152 | * It is assumed that this code is called from the VCPU thread and that the |
---|
217 | 153 | * VCPU therefore is not currently executing guest code. |
---|
.. | .. |
---|
219 | 155 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
---|
220 | 156 | { |
---|
221 | 157 | if (vcpu_el1_is_32bit(vcpu)) |
---|
222 | | - kvm_inject_undef32(vcpu); |
---|
| 158 | + inject_undef32(vcpu); |
---|
223 | 159 | else |
---|
224 | 160 | inject_undef64(vcpu); |
---|
225 | 161 | } |
---|